summaryrefslogtreecommitdiff
path: root/fail2ban/server
diff options
context:
space:
mode:
Diffstat (limited to 'fail2ban/server')
-rw-r--r--fail2ban/server/action.py451
-rw-r--r--fail2ban/server/actions.py306
-rw-r--r--fail2ban/server/banmanager.py50
-rw-r--r--fail2ban/server/database.py209
-rw-r--r--fail2ban/server/datedetector.py107
-rw-r--r--fail2ban/server/datetemplate.py59
-rw-r--r--fail2ban/server/failmanager.py42
-rw-r--r--fail2ban/server/failregex.py121
-rw-r--r--fail2ban/server/filter.py745
-rw-r--r--fail2ban/server/filtergamin.py22
-rw-r--r--fail2ban/server/filterpoll.py28
-rw-r--r--fail2ban/server/filterpyinotify.py57
-rw-r--r--fail2ban/server/filtersystemd.py203
-rw-r--r--fail2ban/server/ipdns.py353
-rw-r--r--fail2ban/server/jail.py14
-rw-r--r--fail2ban/server/jails.py8
-rw-r--r--fail2ban/server/jailthread.py26
-rw-r--r--fail2ban/server/mytime.py72
-rw-r--r--fail2ban/server/observer.py64
-rw-r--r--fail2ban/server/server.py193
-rw-r--r--fail2ban/server/strptime.py94
-rw-r--r--fail2ban/server/ticket.py71
-rw-r--r--fail2ban/server/transmitter.py160
-rw-r--r--fail2ban/server/utils.py51
24 files changed, 2487 insertions, 1019 deletions
diff --git a/fail2ban/server/action.py b/fail2ban/server/action.py
index ec9ad19a..16ff6621 100644
--- a/fail2ban/server/action.py
+++ b/fail2ban/server/action.py
@@ -30,13 +30,17 @@ import tempfile
import threading
import time
from abc import ABCMeta
-from collections import MutableMapping
+try:
+ from collections.abc import MutableMapping
+except ImportError:
+ from collections import MutableMapping
from .failregex import mapTag2Opt
-from .ipdns import asip, DNSUtils
+from .ipdns import DNSUtils
from .mytime import MyTime
from .utils import Utils
-from ..helpers import getLogger, _merge_copy_dicts, uni_string, substituteRecursiveTags, TAG_CRE, MAX_TAG_REPLACE_COUNT
+from ..helpers import getLogger, _merge_copy_dicts, \
+ splitwords, substituteRecursiveTags, uni_string, TAG_CRE, MAX_TAG_REPLACE_COUNT
# Gets the instance of the logger.
logSys = getLogger(__name__)
@@ -44,13 +48,14 @@ logSys = getLogger(__name__)
# Create a lock for running system commands
_cmd_lock = threading.Lock()
-# Todo: make it configurable resp. automatically set, ex.: `[ -f /proc/net/if_inet6 ] && echo 'yes' || echo 'no'`:
-allowed_ipv6 = True
+# Specifies whether IPv6 subsystem is available:
+allowed_ipv6 = DNSUtils.IPv6IsAllowed
# capture groups from filter for map to ticket data:
FCUSTAG_CRE = re.compile(r'<F-([A-Z0-9_\-]+)>'); # currently uppercase only
-CONDITIONAL_FAM_RE = re.compile(r"^(\w+)\?(family)=")
+COND_FAMILIES = ('inet4', 'inet6')
+CONDITIONAL_FAM_RE = re.compile(r"^(\w+)\?(family)=(.*)$")
# Special tags:
DYN_REPL_TAGS = {
@@ -173,7 +178,7 @@ class CallingMap(MutableMapping, object):
def __len__(self):
return len(self.data)
- def copy(self): # pragma: no cover
+ def copy(self):
return self.__class__(_merge_copy_dicts(self.data, self.storage))
@@ -215,6 +220,7 @@ class ActionBase(object):
"start",
"stop",
"ban",
+ "reban",
"unban",
)
for method in required:
@@ -248,6 +254,17 @@ class ActionBase(object):
"""
pass
+ def reban(self, aInfo): # pragma: no cover - abstract
+ """Executed when a ban occurs.
+
+ Parameters
+ ----------
+ aInfo : dict
+ Dictionary which includes information in relation to
+ the ban.
+ """
+ return self.ban(aInfo)
+
@property
def _prolongable(self): # pragma: no cover - abstract
return False
@@ -288,6 +305,7 @@ class CommandAction(ActionBase):
----------
actionban
actioncheck
+ actionreban
actionreload
actionrepair
actionstart
@@ -308,6 +326,7 @@ class CommandAction(ActionBase):
self.actionstart = ''
## Command executed when ticket gets banned.
self.actionban = ''
+ self.actionreban = ''
## Command executed when ticket gets removed.
self.actionunban = ''
## Command executed in order to check requirements.
@@ -343,7 +362,7 @@ class CommandAction(ActionBase):
if wrp == 'ignore': # ignore (filter) dynamic parameters
return
elif wrp == 'str2seconds':
- value = str(MyTime.str2seconds(value))
+ value = MyTime.str2seconds(value)
# parameters changed - clear properties and substitution cache:
self.__properties = None
self.__substCache.clear()
@@ -352,6 +371,8 @@ class CommandAction(ActionBase):
# set:
self.__dict__[name] = value
+ __setitem__ = __setattr__
+
def __delattr__(self, name):
if not name.startswith('_'):
# parameters changed - clear properties and substitution cache:
@@ -375,8 +396,8 @@ class CommandAction(ActionBase):
self.__properties = dict(
(key, getattr(self, key))
for key in dir(self)
- if not key.startswith("_") and not callable(getattr(self, key)))
- #
+ if not key.startswith("_") and not callable(getattr(self, key))
+ )
return self.__properties
@property
@@ -384,10 +405,43 @@ class CommandAction(ActionBase):
return self.__substCache
def _getOperation(self, tag, family):
- return self.replaceTag(tag, self._properties,
- conditional=('family=' + family), cache=self.__substCache)
+ # replace operation tag (interpolate all values), be sure family is enclosed as conditional value
+ # (as lambda in addrepl so only if not overwritten in action):
+ cmd = self.replaceTag(tag, self._properties,
+ conditional=('family='+family if family else ''),
+ cache=self.__substCache)
+ if not family or '<' not in cmd: return cmd
+ # replace family as dynamic tags, important - don't cache, no recursion and auto-escape here:
+ cmd = self.replaceDynamicTags(cmd, {'family':family})
+ return cmd
+
+ def _operationExecuted(self, tag, family, *args):
+ """ Get, set or delete command of operation considering family.
+ """
+ key = ('__eOpCmd',tag)
+ if not len(args): # get
+ if not callable(family): # pragma: no cover
+ return self.__substCache.get(key, {}).get(family)
+ # family as expression - use it to filter values:
+ return [v for f, v in self.__substCache.get(key, {}).iteritems() if family(f)]
+ cmd = args[0]
+ if cmd: # set:
+ try:
+ famd = self.__substCache[key]
+ except KeyError:
+ famd = self.__substCache[key] = {}
+ famd[family] = cmd
+ else: # delete (given family and all other with same command):
+ try:
+ famd = self.__substCache[key]
+ cmd = famd.pop(family)
+ for family, v in famd.items():
+ if v == cmd:
+ del famd[family]
+ except KeyError: # pragma: no cover
+ pass
- def _executeOperation(self, tag, operation, family=[]):
+ def _executeOperation(self, tag, operation, family=[], afterExec=None):
"""Executes the operation commands (like "actionstart", "actionstop", etc).
Replace the tags in the action command with actions properties
@@ -395,54 +449,106 @@ class CommandAction(ActionBase):
"""
# check valid tags in properties (raises ValueError if self recursion, etc.):
res = True
- try:
- # common (resp. ipv4):
- startCmd = None
- if not family or 'inet4' in family:
- startCmd = self._getOperation(tag, 'inet4')
- if startCmd:
- res &= self.executeCmd(startCmd, self.timeout)
- # start ipv6 actions if available:
- if allowed_ipv6 and (not family or 'inet6' in family):
- startCmd6 = self._getOperation(tag, 'inet6')
- if startCmd6 and startCmd6 != startCmd:
- res &= self.executeCmd(startCmd6, self.timeout)
- if not res:
- raise RuntimeError("Error %s action %s/%s" % (operation, self._jail, self._name,))
- except ValueError as e:
- raise RuntimeError("Error %s action %s/%s: %r" % (operation, self._jail, self._name, e))
-
- COND_FAMILIES = ('inet4', 'inet6')
+ err = 'Script error'
+ if not family: # all started:
+ family = [famoper for (famoper,v) in self.__started.iteritems() if v]
+ for famoper in family:
+ try:
+ cmd = self._getOperation(tag, famoper)
+ ret = True
+ # avoid double execution of same command for both families:
+ if cmd and cmd not in self._operationExecuted(tag, lambda f: f != famoper):
+ realCmd = cmd
+ if self._jail:
+ # simulate action info with "empty" ticket:
+ aInfo = getattr(self._jail.actions, 'actionInfo', None)
+ if not aInfo:
+ aInfo = self._jail.actions._getActionInfo(None)
+ setattr(self._jail.actions, 'actionInfo', aInfo)
+ aInfo['time'] = MyTime.time()
+ aInfo['family'] = famoper
+ # replace dynamical tags, important - don't cache, no recursion and auto-escape here
+ realCmd = self.replaceDynamicTags(cmd, aInfo)
+ ret = self.executeCmd(realCmd, self.timeout)
+ res &= ret
+ if afterExec: afterExec(famoper, ret)
+ self._operationExecuted(tag, famoper, cmd if ret else None)
+ except ValueError as e:
+ res = False
+ err = e
+ if not res:
+ raise RuntimeError("Error %s action %s/%s: %r" % (operation, self._jail, self._name, err))
+ return res
+
+ @property
+ def _hasCondSection(self):
+ v = self._properties.get('__hasCondSection')
+ if v is not None:
+ return v
+ v = False
+ for n in self._properties:
+ if CONDITIONAL_FAM_RE.match(n):
+ v = True
+ break
+ self._properties['__hasCondSection'] = v
+ return v
+
+ @property
+ def _families(self):
+ v = self._properties.get('__families')
+ if v: return v
+ v = self._properties.get('families')
+ if v and not isinstance(v, (list,set)): # pragma: no cover - still unused
+ v = splitwords(v)
+ elif self._hasCondSection: # all conditional families:
+ # todo: check it is needed at all # common (resp. ipv4) + ipv6 if allowed:
+ v = ['inet4', 'inet6'] if allowed_ipv6() else ['inet4']
+ else: # all action tags seems to be the same
+ v = ['']
+ self._properties['__families'] = v
+ return v
@property
def _startOnDemand(self):
"""Checks the action depends on family (conditional)"""
v = self._properties.get('actionstart_on_demand')
- if v is None:
- v = False
- for n in self._properties:
- if CONDITIONAL_FAM_RE.match(n):
- v = True
- break
+ if v is not None:
+ return v
+ # not set - auto-recognize (depending on conditional):
+ v = self._hasCondSection
self._properties['actionstart_on_demand'] = v
return v
- def start(self, family=[]):
+ def start(self):
+ """Executes the "actionstart" command.
+
+ Replace the tags in the action command with actions properties
+ and executes the resulting command.
+ """
+ return self._start()
+
+ def _start(self, family=None, forceStart=False):
"""Executes the "actionstart" command.
Replace the tags in the action command with actions properties
and executes the resulting command.
"""
- if not family:
- # check the action depends on family (conditional):
- if self._startOnDemand:
+ # check the action depends on family (conditional):
+ if self._startOnDemand:
+ if not forceStart:
return True
- elif self.__started.get(family): # pragma: no cover - normally unreachable
+ elif not forceStart and self.__started.get(family): # pragma: no cover - normally unreachable
return True
- return self._executeOperation('<actionstart>', 'starting', family=family)
+ family = [family] if family is not None else self._families
+ def _started(family, ret):
+ if ret:
+ self._operationExecuted('<actionstop>', family, None)
+ self.__started[family] = 1
+ ret = self._executeOperation('<actionstart>', 'starting', family=family, afterExec=_started)
+ return ret
- def ban(self, aInfo):
- """Executes the "actionban" command.
+ def ban(self, aInfo, cmd='<actionban>'):
+ """Executes the given command ("actionban" or "actionreban").
Replaces the tags in the action command with actions properties
and ban information, and executes the resulting command.
@@ -454,21 +560,14 @@ class CommandAction(ActionBase):
the ban.
"""
# if we should start the action on demand (conditional by family):
+ family = aInfo.get('family', '')
if self._startOnDemand:
- family = aInfo.get('family')
if not self.__started.get(family):
- self.start(family)
- self.__started[family] = 1
- # mark also another families as "started" (-1), if they are equal
- # (on demand, but the same for ipv4 and ipv6):
- cmd = self._getOperation('<actionstart>', family)
- for f in CommandAction.COND_FAMILIES:
- if f != family and not self.__started.get(f):
- if cmd == self._getOperation('<actionstart>', f):
- self.__started[f] = -1
+ self._start(family, forceStart=True)
# ban:
- if not self._processCmd('<actionban>', aInfo):
+ if not self._processCmd(cmd, aInfo):
raise RuntimeError("Error banning %(ip)s" % aInfo)
+ self.__started[family] = self.__started.get(family, 0) | 3; # started and contains items
@property
def _prolongable(self):
@@ -502,8 +601,25 @@ class CommandAction(ActionBase):
Dictionary which includes information in relation to
the ban.
"""
- if not self._processCmd('<actionunban>', aInfo):
- raise RuntimeError("Error unbanning %(ip)s" % aInfo)
+ family = aInfo.get('family', '')
+ if self.__started.get(family, 0) & 2: # contains items
+ if not self._processCmd('<actionunban>', aInfo):
+ raise RuntimeError("Error unbanning %(ip)s" % aInfo)
+
+ def reban(self, aInfo):
+ """Executes the "actionreban" command if available, otherwise simply repeat "actionban".
+
+ Replaces the tags in the action command with actions properties
+ and ban information, and executes the resulting command.
+
+ Parameters
+ ----------
+ aInfo : dict
+ Dictionary which includes information in relation to
+ the ban.
+ """
+ # re-ban:
+ return self.ban(aInfo, '<actionreban>' if self.actionreban else '<actionban>')
def flush(self):
"""Executes the "actionflush" command.
@@ -514,15 +630,15 @@ class CommandAction(ActionBase):
Replaces the tags in the action command with actions properties
and executes the resulting command.
"""
- family = []
- # collect started families, if started on demand (conditional):
- if self._startOnDemand:
- for f in CommandAction.COND_FAMILIES:
- if self.__started.get(f) == 1: # only real started:
- family.append(f)
- # if no started (on demand) actions:
- if not family: return True
- return self._executeOperation('<actionflush>', 'flushing', family=family)
+ # collect started families, may be started on demand (conditional):
+ family = [f for (f,v) in self.__started.iteritems() if v & 3 == 3]; # started and contains items
+ # if nothing contains items:
+ if not family: return True
+ # flush:
+ def _afterFlush(family, ret):
+ if ret and self.__started.get(family):
+ self.__started[family] &= ~2; # no items anymore
+ return self._executeOperation('<actionflush>', 'flushing', family=family, afterExec=_afterFlush)
def stop(self):
"""Executes the "actionstop" command.
@@ -530,16 +646,30 @@ class CommandAction(ActionBase):
Replaces the tags in the action command with actions properties
and executes the resulting command.
"""
- family = []
+ return self._stop()
+
+ def _stop(self, family=None):
+ """Executes the "actionstop" command.
+
+ Replaces the tags in the action command with actions properties
+ and executes the resulting command.
+ """
# collect started families, if started on demand (conditional):
- if self._startOnDemand:
- for f in CommandAction.COND_FAMILIES:
- if self.__started.get(f) == 1: # only real started:
- family.append(f)
- self.__started[f] = 0
+ if family is None:
+ family = [f for (f,v) in self.__started.iteritems() if v]
# if no started (on demand) actions:
if not family: return True
- return self._executeOperation('<actionstop>', 'stopping', family=family)
+ self.__started = {}
+ else:
+ try:
+ self.__started[family] &= 0
+ family = [family]
+ except KeyError: # pragma: no cover
+ return True
+ def _stopped(family, ret):
+ if ret:
+ self._operationExecuted('<actionstart>', family, None)
+ return self._executeOperation('<actionstop>', 'stopping', family=family, afterExec=_stopped)
def reload(self, **kwargs):
"""Executes the "actionreload" command.
@@ -554,6 +684,20 @@ class CommandAction(ActionBase):
"""
return self._executeOperation('<actionreload>', 'reloading')
+ def consistencyCheck(self, beforeRepair=None):
+ """Executes the invariant check with repair if expected (conditional).
+ """
+ ret = True
+ # for each started family:
+ if self.actioncheck:
+ for (family, started) in self.__started.items():
+ if started and not self._invariantCheck(family, beforeRepair):
+ # reset started flag and command of executed operation:
+ self.__started[family] = 0
+ self._operationExecuted('<actionstart>', family, None)
+ ret &= False
+ return ret
+
ESCAPE_CRE = re.compile(r"""[\\#&;`|*?~<>^()\[\]{}$'"\n\r]""")
@classmethod
@@ -586,7 +730,7 @@ class CommandAction(ActionBase):
return value
@classmethod
- def replaceTag(cls, query, aInfo, conditional='', cache=None):
+ def replaceTag(cls, query, aInfo, conditional='', addrepl=None, cache=None):
"""Replaces tags in `query` with property values.
Parameters
@@ -627,7 +771,8 @@ class CommandAction(ActionBase):
pass
# interpolation of dictionary:
if subInfo is None:
- subInfo = substituteRecursiveTags(aInfo, conditional, ignore=cls._escapedTags)
+ subInfo = substituteRecursiveTags(aInfo, conditional, ignore=cls._escapedTags,
+ addrepl=addrepl)
# cache if possible:
if csubkey is not None:
cache[csubkey] = subInfo
@@ -678,7 +823,7 @@ class CommandAction(ActionBase):
ESCAPE_VN_CRE = re.compile(r"\W")
@classmethod
- def replaceDynamicTags(cls, realCmd, aInfo):
+ def replaceDynamicTags(cls, realCmd, aInfo, escapeVal=None):
"""Replaces dynamical tags in `query` with property values.
**Important**
@@ -703,16 +848,17 @@ class CommandAction(ActionBase):
# array for escaped vars:
varsDict = dict()
- def escapeVal(tag, value):
- # if the value should be escaped:
- if cls.ESCAPE_CRE.search(value):
- # That one needs to be escaped since its content is
- # out of our control
- tag = 'f2bV_%s' % cls.ESCAPE_VN_CRE.sub('_', tag)
- varsDict[tag] = value # add variable
- value = '$'+tag # replacement as variable
- # replacement for tag:
- return value
+ if not escapeVal:
+ def escapeVal(tag, value):
+ # if the value should be escaped:
+ if cls.ESCAPE_CRE.search(value):
+ # That one needs to be escaped since its content is
+ # out of our control
+ tag = 'f2bV_%s' % cls.ESCAPE_VN_CRE.sub('_', tag)
+ varsDict[tag] = value # add variable
+ value = '$'+tag # replacement as variable
+ # replacement for tag:
+ return value
# additional replacement as calling map:
ADD_REPL_TAGS_CM = CallingMap(ADD_REPL_TAGS)
@@ -736,7 +882,7 @@ class CommandAction(ActionBase):
tickData = aInfo.get("F-*")
if not tickData: tickData = {}
def substTag(m):
- tag = mapTag2Opt(m.groups()[0])
+ tag = mapTag2Opt(m.group(1))
try:
value = uni_string(tickData[tag])
except KeyError:
@@ -750,7 +896,58 @@ class CommandAction(ActionBase):
realCmd = Utils.buildShellCmd(realCmd, varsDict)
return realCmd
- def _processCmd(self, cmd, aInfo=None, conditional=''):
+ @property
+ def banEpoch(self):
+ return getattr(self, '_banEpoch', 0)
+ def invalidateBanEpoch(self):
+ """Increments ban epoch of jail and this action, so already banned tickets would cause
+ a re-ban for all tickets with previous epoch."""
+ if self._jail is not None:
+ self._banEpoch = self._jail.actions.banEpoch = self._jail.actions.banEpoch + 1
+ else:
+ self._banEpoch = self.banEpoch + 1
+
+ def _invariantCheck(self, family=None, beforeRepair=None, forceStart=True):
+ """Executes a substituted `actioncheck` command.
+ """
+ # for started action/family only (avoid check not started inet4 if inet6 gets broken):
+ if not forceStart and family is not None and family not in self.__started:
+ return 1
+ checkCmd = self._getOperation('<actioncheck>', family)
+ if not checkCmd or self.executeCmd(checkCmd, self.timeout):
+ return 1
+ # if don't need repair/restore - just return:
+ if beforeRepair and not beforeRepair():
+ return -1
+ self._logSys.error(
+ "Invariant check failed. Trying to restore a sane environment")
+ # increment ban epoch of jail and this action (allows re-ban on already banned):
+ self.invalidateBanEpoch()
+ # try to find repair command, if exists - exec it:
+ repairCmd = self._getOperation('<actionrepair>', family)
+ if repairCmd:
+ if not self.executeCmd(repairCmd, self.timeout):
+ self.__started[family] = 0
+ self._logSys.critical("Unable to restore environment")
+ return 0
+ self.__started[family] = 1
+ else:
+ # no repair command, try to restart action...
+ # [WARNING] TODO: be sure all banactions get a repair command, because
+ # otherwise stop/start will theoretically remove all the bans,
+ # but the tickets are still in BanManager, so in case of new failures
+ # it will not be banned, because "already banned" will happen.
+ try:
+ self._stop(family)
+ except RuntimeError: # bypass error in stop (if start/check succeeded hereafter).
+ pass
+ self._start(family, forceStart=forceStart or not self._startOnDemand)
+ if self.__started.get(family) and not self.executeCmd(checkCmd, self.timeout):
+ self._logSys.critical("Unable to restore environment")
+ return 0
+ return 1
+
+ def _processCmd(self, cmd, aInfo=None):
"""Executes a command with preliminary checks and substitutions.
Before executing any commands, executes the "check" command first
@@ -775,55 +972,43 @@ class CommandAction(ActionBase):
return True
# conditional corresponding family of the given ip:
- if conditional == '':
- conditional = 'family=inet4'
- if allowed_ipv6:
- try:
- ip = aInfo["ip"]
- if ip and asip(ip).isIPv6:
- conditional = 'family=inet6'
- except KeyError:
- pass
+ try:
+ family = aInfo["family"]
+ except (KeyError, TypeError):
+ family = ''
+
+ repcnt = 0
+ while True:
- checkCmd = self.replaceTag('<actioncheck>', self._properties,
- conditional=conditional, cache=self.__substCache)
- if checkCmd:
- if not self.executeCmd(checkCmd, self.timeout):
- self._logSys.error(
- "Invariant check failed. Trying to restore a sane environment")
- # try to find repair command, if exists - exec it:
- repairCmd = self.replaceTag('<actionrepair>', self._properties,
- conditional=conditional, cache=self.__substCache)
- if repairCmd:
- if not self.executeCmd(repairCmd, self.timeout):
- self._logSys.critical("Unable to restore environment")
+ # got some error, do invariant check:
+ if repcnt and self.actioncheck:
+ # don't repair/restore if unban (no matter):
+ def _beforeRepair():
+ if cmd == '<actionunban>' and not self._properties.get('actionrepair_on_unban'):
+ self._logSys.error("Invariant check failed. Unban is impossible.")
return False
- else:
- # no repair command, try to restart action...
- # [WARNING] TODO: be sure all banactions get a repair command, because
- # otherwise stop/start will theoretically remove all the bans,
- # but the tickets are still in BanManager, so in case of new failures
- # it will not be banned, because "already banned" will happen.
- try:
- self.stop()
- except RuntimeError: # bypass error in stop (if start/check succeeded hereafter).
- pass
- self.start()
- if not self.executeCmd(checkCmd, self.timeout):
- self._logSys.critical("Unable to restore environment")
+ return True
+ # check and repair if broken:
+ ret = self._invariantCheck(family, _beforeRepair, forceStart=(cmd != '<actionunban>'))
+ # if not sane (and not restored) return:
+ if ret != 1:
return False
- # Replace static fields
- realCmd = self.replaceTag(cmd, self._properties,
- conditional=conditional, cache=self.__substCache)
+ # Replace static fields
+ realCmd = self.replaceTag(cmd, self._properties,
+ conditional=('family='+family if family else ''), cache=self.__substCache)
- # Replace dynamical tags, important - don't cache, no recursion and auto-escape here
- if aInfo is not None:
- realCmd = self.replaceDynamicTags(realCmd, aInfo)
- else:
- realCmd = cmd
+ # Replace dynamical tags, important - don't cache, no recursion and auto-escape here
+ if aInfo is not None:
+ realCmd = self.replaceDynamicTags(realCmd, aInfo)
+ else:
+ realCmd = cmd
- return self.executeCmd(realCmd, self.timeout)
+ # try execute command:
+ ret = self.executeCmd(realCmd, self.timeout)
+ repcnt += 1
+ if ret or repcnt > 1:
+ return ret
@staticmethod
def executeCmd(realCmd, timeout=60, **kwargs):
@@ -848,7 +1033,7 @@ class CommandAction(ActionBase):
RuntimeError
If command execution times out.
"""
- if logSys.getEffectiveLevel() < logging.DEBUG: # pragma: no cover
+ if logSys.getEffectiveLevel() < logging.DEBUG:
logSys.log(9, realCmd)
if not realCmd:
logSys.debug("Nothing to do")
diff --git a/fail2ban/server/actions.py b/fail2ban/server/actions.py
index 3d862275..fa045ab5 100644
--- a/fail2ban/server/actions.py
+++ b/fail2ban/server/actions.py
@@ -28,13 +28,14 @@ import logging
import os
import sys
import time
-from collections import Mapping
try:
- from collections import OrderedDict
+ from collections.abc import Mapping
except ImportError:
- OrderedDict = dict
+ from collections import Mapping
+from collections import OrderedDict
from .banmanager import BanManager, BanTicket
+from .ipdns import IPAddr
from .jailthread import JailThread
from .action import ActionBase, CommandAction, CallingMap
from .mytime import MyTime
@@ -75,12 +76,18 @@ class Actions(JailThread, Mapping):
"""
def __init__(self, jail):
- JailThread.__init__(self)
+ JailThread.__init__(self, name="f2b/a."+jail.name)
## The jail which contains this action.
self._jail = jail
self._actions = OrderedDict()
## The ban manager.
- self.__banManager = BanManager()
+ self.banManager = BanManager()
+ self.banEpoch = 0
+ self.__lastConsistencyCheckTM = 0
+ ## Precedence of ban (over unban), so max number of tickets banned (to call an unban check):
+ self.banPrecedence = 10
+ ## Max count of outdated tickets to unban per each __checkUnBan operation:
+ self.unbanMaxCount = self.banPrecedence * 2
@staticmethod
def _load_python_module(pythonModule):
@@ -156,8 +163,8 @@ class Actions(JailThread, Mapping):
delacts = OrderedDict((name, action) for name, action in self._actions.iteritems()
if name not in self._reload_actions)
if len(delacts):
- # unban all tickets using remove action only:
- self.__flushBan(db=False, actions=delacts)
+ # unban all tickets using removed actions only:
+ self.__flushBan(db=False, actions=delacts, stop=True)
# stop and remove it:
self.stopActions(actions=delacts)
delattr(self, '_reload_actions')
@@ -193,7 +200,7 @@ class Actions(JailThread, Mapping):
def setBanTime(self, value):
value = MyTime.str2seconds(value)
- self.__banManager.setBanTime(value)
+ self.banManager.setBanTime(value)
logSys.info(" banTime: %s" % value)
##
@@ -202,7 +209,38 @@ class Actions(JailThread, Mapping):
# @return the time
def getBanTime(self):
- return self.__banManager.getBanTime()
+ return self.banManager.getBanTime()
+
+ def getBanned(self, ids):
+ lst = self.banManager.getBanList()
+ if not ids:
+ return lst
+ if len(ids) == 1:
+ return 1 if ids[0] in lst else 0
+ return map(lambda ip: 1 if ip in lst else 0, ids)
+
+ def getBanList(self, withTime=False):
+ """Returns the list of banned IP addresses.
+
+ Returns
+ -------
+ list
+ The list of banned IP addresses.
+ """
+ return self.banManager.getBanList(ordered=True, withTime=withTime)
+
+ def addBannedIP(self, ip):
+ """Ban an IP or list of IPs."""
+ unixTime = MyTime.time()
+
+ if isinstance(ip, list):
+ # Multiple IPs:
+ tickets = (BanTicket(ip, unixTime) for ip in ip)
+ else:
+ # Single IP:
+ tickets = (BanTicket(ip, unixTime),)
+
+ return self.__checkBan(tickets)
def removeBannedIP(self, ip=None, db=True, ifexists=False):
"""Removes banned IP calling actions' unban method
@@ -212,8 +250,8 @@ class Actions(JailThread, Mapping):
Parameters
----------
- ip : str or IPAddr or None
- The IP address to unban or all IPs if None
+ ip : list, str, IPAddr or None
+ The IP address (or multiple IPs as list) to unban or all IPs if None
Raises
------
@@ -223,19 +261,42 @@ class Actions(JailThread, Mapping):
# Unban all?
if ip is None:
return self.__flushBan(db)
+ # Multiple IPs:
+ if isinstance(ip, (list, tuple)):
+ missed = []
+ cnt = 0
+ for i in ip:
+ try:
+ cnt += self.removeBannedIP(i, db, ifexists)
+ except ValueError:
+ if not ifexists:
+ missed.append(i)
+ if missed:
+ raise ValueError("not banned: %r" % missed)
+ return cnt
# Single IP:
# Always delete ip from database (also if currently not banned)
if db and self._jail.database is not None:
self._jail.database.delBan(self._jail, ip)
# Find the ticket with the IP.
- ticket = self.__banManager.getTicketByID(ip)
+ ticket = self.banManager.getTicketByID(ip)
if ticket is not None:
# Unban the IP.
self.__unBan(ticket)
else:
+ # Multiple IPs by subnet or dns:
+ if not isinstance(ip, IPAddr):
+ ipa = IPAddr(ip)
+ if not ipa.isSingle: # subnet (mask/cidr) or raw (may be dns/hostname):
+ ips = filter(ipa.contains, self.banManager.getBanList())
+ if ips:
+ return self.removeBannedIP(ips, db, ifexists)
+ # not found:
+ msg = "%s is not banned" % ip
+ logSys.log(logging.MSG, msg)
if ifexists:
return 0
- raise ValueError("%s is not banned" % ip)
+ raise ValueError(msg)
return 1
@@ -244,9 +305,7 @@ class Actions(JailThread, Mapping):
"""
if actions is None:
actions = self._actions
- revactions = actions.items()
- revactions.reverse()
- for name, action in revactions:
+ for name, action in reversed(actions.items()):
try:
action.stop()
except Exception as e:
@@ -268,6 +327,7 @@ class Actions(JailThread, Mapping):
bool
True when the thread exits nicely.
"""
+ cnt = 0
for name, action in self._actions.iteritems():
try:
action.start()
@@ -276,16 +336,34 @@ class Actions(JailThread, Mapping):
self._jail.name, name, e,
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
while self.active:
- if self.idle:
- logSys.debug("Actions: enter idle mode")
- Utils.wait_for(lambda: not self.active or not self.idle,
- lambda: False, self.sleeptime)
- logSys.debug("Actions: leave idle mode")
- continue
- if not Utils.wait_for(lambda: not self.active or self.__checkBan(), self.sleeptime):
- self.__checkUnBan()
-
- self.__flushBan()
+ try:
+ if self.idle:
+ logSys.debug("Actions: enter idle mode")
+ Utils.wait_for(lambda: not self.active or not self.idle,
+ lambda: False, self.sleeptime)
+ logSys.debug("Actions: leave idle mode")
+ continue
+ # wait for ban (stop if gets inactive, pending ban or unban):
+ bancnt = 0
+ wt = min(self.sleeptime, self.banManager._nextUnbanTime - MyTime.time())
+ logSys.log(5, "Actions: wait for pending tickets %s (default %s)", wt, self.sleeptime)
+ if Utils.wait_for(lambda: not self.active or self._jail.hasFailTickets, wt):
+ bancnt = self.__checkBan()
+ cnt += bancnt
+ # unban if nothing is banned not later than banned tickets >= banPrecedence
+ if not bancnt or cnt >= self.banPrecedence:
+ if self.active:
+ # let shrink the ban list faster
+ bancnt *= 2
+ logSys.log(5, "Actions: check-unban %s, bancnt %s, max: %s", bancnt if bancnt and bancnt < self.unbanMaxCount else self.unbanMaxCount, bancnt, self.unbanMaxCount)
+ self.__checkUnBan(bancnt if bancnt and bancnt < self.unbanMaxCount else self.unbanMaxCount)
+ cnt = 0
+ except Exception as e: # pragma: no cover
+ logSys.error("[%s] unhandled error in actions thread: %s",
+ self._jail.name, e,
+ exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
+
+ self.__flushBan(stop=True)
self.stopActions()
return True
@@ -314,7 +392,12 @@ class Actions(JailThread, Mapping):
"ipfailures": lambda self: self._mi4ip(True).getAttempt(),
"ipjailfailures": lambda self: self._mi4ip().getAttempt(),
# raw ticket info:
- "raw-ticket": lambda self: repr(self.__ticket)
+ "raw-ticket": lambda self: repr(self.__ticket),
+ # jail info:
+ "jail.banned": lambda self: self.__jail.actions.banManager.size(),
+ "jail.banned_total": lambda self: self.__jail.actions.banManager.getBanTotal(),
+ "jail.found": lambda self: self.__jail.filter.failManager.size(),
+ "jail.found_total": lambda self: self.__jail.filter.failManager.getFailTotal()
}
__slots__ = CallingMap.__slots__ + ('__ticket', '__jail', '__mi4ip')
@@ -332,7 +415,7 @@ class Actions(JailThread, Mapping):
def _getBanTime(self):
btime = self.__ticket.getBanTime()
if btime is None: btime = self.__jail.actions.getBanTime()
- return btime
+ return int(btime)
def _mi4ip(self, overalljails=False):
"""Gets bans merged once, a helper for lambda(s), prevents stop of executing action by any exception inside.
@@ -377,15 +460,26 @@ class Actions(JailThread, Mapping):
return mi[idx] if mi[idx] is not None else self.__ticket
- def __getActionInfo(self, ticket):
+ def _getActionInfo(self, ticket):
+ if not ticket:
+ ticket = BanTicket("", MyTime.time())
aInfo = Actions.ActionInfo(ticket, self._jail)
return aInfo
+ def __getFailTickets(self, count=100):
+ """Generator to get maximal count failure tickets from fail-manager."""
+ cnt = 0
+ while cnt < count:
+ ticket = self._jail.getFailTicket()
+ if not ticket:
+ break
+ yield ticket
+ cnt += 1
- def __checkBan(self):
+ def __checkBan(self, tickets=None):
"""Check for IP address to ban.
- Look in the jail queue for FailTicket. If a ticket is available,
+ If tickets are not specified look in the jail queue for FailTicket. If a ticket is available,
it executes the "ban" command and adds a ticket to the BanManager.
Returns
@@ -394,17 +488,17 @@ class Actions(JailThread, Mapping):
True if an IP address get banned.
"""
cnt = 0
- while cnt < 100:
- ticket = self._jail.getFailTicket()
- if not ticket:
- break
+ if not tickets:
+ tickets = self.__getFailTickets(self.banPrecedence)
+ rebanacts = None
+ for ticket in tickets:
bTicket = BanTicket.wrap(ticket)
- btime = ticket.getBanTime(self.__banManager.getBanTime())
- ip = bTicket.getIP()
- aInfo = self.__getActionInfo(bTicket)
+ btime = ticket.getBanTime(self.banManager.getBanTime())
+ ip = bTicket.getID()
+ aInfo = self._getActionInfo(bTicket)
reason = {}
- if self.__banManager.addBanTicket(bTicket, reason=reason):
+ if self.banManager.addBanTicket(bTicket, reason=reason):
cnt += 1
# report ticket to observer, to check time should be increased and hereafter observer writes ban to database (asynchronous)
if Observers.Main is not None and not bTicket.restored:
@@ -413,7 +507,7 @@ class Actions(JailThread, Mapping):
# do actions :
for name, action in self._actions.iteritems():
try:
- if ticket.restored and getattr(action, 'norestored', False):
+ if bTicket.restored and getattr(action, 'norestored', False):
continue
if not aInfo.immutable: aInfo.reset()
action.ban(aInfo)
@@ -425,6 +519,8 @@ class Actions(JailThread, Mapping):
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
# after all actions are processed set banned flag:
bTicket.banned = True
+ if self.banEpoch: # be sure tickets always have the same ban epoch (default 0):
+ bTicket.banEpoch = self.banEpoch
else:
if reason.get('expired', 0):
logSys.info('[%s] Ignore %s, expired bantime', self._jail.name, ip)
@@ -442,15 +538,67 @@ class Actions(JailThread, Mapping):
else logging.NOTICE if diftm < 60 \
else logging.WARNING
logSys.log(ll, "[%s] %s already banned", self._jail.name, ip)
+ # if long time after ban - do consistency check (something is wrong here):
+ if bTicket.banEpoch == self.banEpoch and diftm > 3:
+ # avoid too often checks:
+ if not rebanacts and MyTime.time() > self.__lastConsistencyCheckTM + 3:
+ self.__lastConsistencyCheckTM = MyTime.time()
+ for action in self._actions.itervalues():
+ if hasattr(action, 'consistencyCheck'):
+ action.consistencyCheck()
+ # check epoch in order to reban it:
+ if bTicket.banEpoch < self.banEpoch:
+ if not rebanacts: rebanacts = dict(
+ (name, action) for name, action in self._actions.iteritems()
+ if action.banEpoch > bTicket.banEpoch)
+ cnt += self.__reBan(bTicket, actions=rebanacts)
+ else: # pragma: no cover - unexpected: ticket is not banned for some reasons - reban using all actions:
+ cnt += self.__reBan(bTicket)
+ # add ban to database moved to observer (should previously check not already banned
+ # and increase ticket time if "bantime.increment" set)
if cnt:
logSys.debug("Banned %s / %s, %s ticket(s) in %r", cnt,
- self.__banManager.getBanTotal(), self.__banManager.size(), self._jail.name)
+ self.banManager.getBanTotal(), self.banManager.size(), self._jail.name)
return cnt
+ def __reBan(self, ticket, actions=None, log=True):
+ """Repeat bans for the ticket.
+
+ Executes the actions in order to reban the host given in the
+ ticket.
+
+ Parameters
+ ----------
+ ticket : Ticket
+ Ticket to reban
+ """
+ actions = actions or self._actions
+ ip = ticket.getID()
+ aInfo = self._getActionInfo(ticket)
+ if log:
+ logSys.notice("[%s] Reban %s%s", self._jail.name, ip, (', action %r' % actions.keys()[0] if len(actions) == 1 else ''))
+ for name, action in actions.iteritems():
+ try:
+ logSys.debug("[%s] action %r: reban %s", self._jail.name, name, ip)
+ if not aInfo.immutable: aInfo.reset()
+ action.reban(aInfo)
+ except Exception as e:
+ logSys.error(
+ "Failed to execute reban jail '%s' action '%s' "
+ "info '%r': %s",
+ self._jail.name, name, aInfo, e,
+ exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
+ return 0
+ # after all actions are processed set banned flag:
+ ticket.banned = True
+ if self.banEpoch: # be sure tickets always have the same ban epoch (default 0):
+ ticket.banEpoch = self.banEpoch
+ return 1
+
def _prolongBan(self, ticket):
# prevent to prolong ticket that was removed in-between,
# if it in ban list - ban time already prolonged (and it stays there):
- if not self.__banManager._inBanList(ticket): return
+ if not self.banManager._inBanList(ticket): return
# do actions :
aInfo = None
for name, action in self._actions.iteritems():
@@ -460,7 +608,7 @@ class Actions(JailThread, Mapping):
if not action._prolongable:
continue
if aInfo is None:
- aInfo = self.__getActionInfo(ticket)
+ aInfo = self._getActionInfo(ticket)
if not aInfo.immutable: aInfo.reset()
action.prolong(aInfo)
except Exception as e:
@@ -470,21 +618,21 @@ class Actions(JailThread, Mapping):
self._jail.name, name, aInfo, e,
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
- def __checkUnBan(self):
+ def __checkUnBan(self, maxCount=None):
"""Check for IP address to unban.
Unban IP addresses which are outdated.
"""
- lst = self.__banManager.unBanList(MyTime.time())
+ lst = self.banManager.unBanList(MyTime.time(), maxCount)
for ticket in lst:
self.__unBan(ticket)
cnt = len(lst)
if cnt:
logSys.debug("Unbanned %s, %s ticket(s) in %r",
- cnt, self.__banManager.size(), self._jail.name)
+ cnt, self.banManager.size(), self._jail.name)
return cnt
- def __flushBan(self, db=False, actions=None):
+ def __flushBan(self, db=False, actions=None, stop=False):
"""Flush the ban list.
Unban all IP address which are still in the banning list.
@@ -495,31 +643,47 @@ class Actions(JailThread, Mapping):
log = True
if actions is None:
logSys.debug(" Flush ban list")
- lst = self.__banManager.flushBanList()
+ lst = self.banManager.flushBanList()
else:
log = False # don't log "[jail] Unban ..." if removing actions only.
- lst = iter(self.__banManager)
+ lst = iter(self.banManager)
cnt = 0
# first we'll execute flush for actions supporting this operation:
unbactions = {}
for name, action in (actions if actions is not None else self._actions).iteritems():
- if hasattr(action, 'flush') and action.actionflush:
- logSys.notice("[%s] Flush ticket(s) with %s", self._jail.name, name)
- action.flush()
- else:
- unbactions[name] = action
+ try:
+ if hasattr(action, 'flush') and (not isinstance(action, CommandAction) or action.actionflush):
+ logSys.notice("[%s] Flush ticket(s) with %s", self._jail.name, name)
+ if action.flush():
+ continue
+ except Exception as e:
+ logSys.error("Failed to flush bans in jail '%s' action '%s': %s",
+ self._jail.name, name, e,
+ exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
+ logSys.info("No flush occurred, do consistency check")
+ if hasattr(action, 'consistencyCheck'):
+ def _beforeRepair():
+ if stop and not getattr(action, 'actionrepair_on_unban', None): # don't need repair on stop
+ logSys.error("Invariant check failed. Flush is impossible.")
+ return False
+ return True
+ action.consistencyCheck(_beforeRepair)
+ continue
+ # fallback to single unbans:
+ logSys.debug(" Unban tickets each individualy")
+ unbactions[name] = action
actions = unbactions
# flush the database also:
if db and self._jail.database is not None:
logSys.debug(" Flush jail in database")
self._jail.database.delBan(self._jail)
- # unban each ticket with non-flasheable actions:
+ # unban each ticket with non-flusheable actions:
for ticket in lst:
# unban ip:
self.__unBan(ticket, actions=actions, log=log)
cnt += 1
logSys.debug(" Unbanned %s, %s ticket(s) in %r",
- cnt, self.__banManager.size(), self._jail.name)
+ cnt, self.banManager.size(), self._jail.name)
return cnt
def __unBan(self, ticket, actions=None, log=True):
@@ -537,14 +701,12 @@ class Actions(JailThread, Mapping):
unbactions = self._actions
else:
unbactions = actions
- ip = ticket.getIP()
- aInfo = self.__getActionInfo(ticket)
+ ip = ticket.getID()
+ aInfo = self._getActionInfo(ticket)
if log:
- logSys.notice("[%s] Unban %s", self._jail.name, aInfo["ip"])
+ logSys.notice("[%s] Unban %s", self._jail.name, ip)
for name, action in unbactions.iteritems():
try:
- if ticket.restored and getattr(action, 'norestored', False):
- continue
logSys.debug("[%s] action %r: unban %s", self._jail.name, name, ip)
if not aInfo.immutable: aInfo.reset()
action.unban(aInfo)
@@ -559,17 +721,23 @@ class Actions(JailThread, Mapping):
"""Status of current and total ban counts and current banned IP list.
"""
# TODO: Allow this list to be printed as 'status' output
- supported_flavors = ["basic", "cymru"]
+ supported_flavors = ["short", "basic", "cymru"]
if flavor is None or flavor not in supported_flavors:
logSys.warning("Unsupported extended jail status flavor %r. Supported: %s" % (flavor, supported_flavors))
# Always print this information (basic)
- ret = [("Currently banned", self.__banManager.size()),
- ("Total banned", self.__banManager.getBanTotal()),
- ("Banned IP list", self.__banManager.getBanList())]
+ if flavor != "short":
+ banned = self.banManager.getBanList()
+ cnt = len(banned)
+ else:
+ cnt = self.banManager.size()
+ ret = [("Currently banned", cnt),
+ ("Total banned", self.banManager.getBanTotal())]
+ if flavor != "short":
+ ret += [("Banned IP list", banned)]
if flavor == "cymru":
- cymru_info = self.__banManager.getBanListExtendedCymruInfo()
+ cymru_info = self.banManager.getBanListExtendedCymruInfo()
ret += \
- [("Banned ASN list", self.__banManager.geBanListExtendedASN(cymru_info)),
- ("Banned Country list", self.__banManager.geBanListExtendedCountry(cymru_info)),
- ("Banned RIR list", self.__banManager.geBanListExtendedRIR(cymru_info))]
+ [("Banned ASN list", self.banManager.geBanListExtendedASN(cymru_info)),
+ ("Banned Country list", self.banManager.geBanListExtendedCountry(cymru_info)),
+ ("Banned RIR list", self.banManager.geBanListExtendedRIR(cymru_info))]
return ret
diff --git a/fail2ban/server/banmanager.py b/fail2ban/server/banmanager.py
index 1340fb52..9168d5b8 100644
--- a/fail2ban/server/banmanager.py
+++ b/fail2ban/server/banmanager.py
@@ -57,7 +57,7 @@ class BanManager:
## Total number of banned IP address
self.__banTotal = 0
## The time for next unban process (for performance and load reasons):
- self.__nextUnbanTime = BanTicket.MAX_TIME
+ self._nextUnbanTime = BanTicket.MAX_TIME
##
# Set the ban time.
@@ -66,7 +66,6 @@ class BanManager:
# @param value the time
def setBanTime(self, value):
- with self.__lock:
self.__banTime = int(value)
##
@@ -76,7 +75,6 @@ class BanManager:
# @return the time
def getBanTime(self):
- with self.__lock:
return self.__banTime
##
@@ -85,7 +83,6 @@ class BanManager:
# @param value total number
def setBanTotal(self, value):
- with self.__lock:
self.__banTotal = value
##
@@ -94,7 +91,6 @@ class BanManager:
# @return the total number
def getBanTotal(self):
- with self.__lock:
return self.__banTotal
##
@@ -102,9 +98,22 @@ class BanManager:
#
# @return IP list
- def getBanList(self):
+ def getBanList(self, ordered=False, withTime=False):
+ if not ordered:
+ return list(self.__banList.keys())
with self.__lock:
- return self.__banList.keys()
+ lst = []
+ for ticket in self.__banList.itervalues():
+ eob = ticket.getEndOfBanTime(self.__banTime)
+ lst.append((ticket,eob))
+ lst.sort(key=lambda t: t[1])
+ t2s = MyTime.time2str
+ if withTime:
+ return ['%s \t%s + %d = %s' % (
+ t[0].getID(),
+ t2s(t[0].getTime()), t[0].getBanTime(self.__banTime), t2s(t[1])
+ ) for t in lst]
+ return [t[0].getID() for t in lst]
##
# Returns a iterator to ban list (used in reload, so idle).
@@ -112,8 +121,8 @@ class BanManager:
# @return ban list iterator
def __iter__(self):
- with self.__lock:
- return self.__banList.itervalues()
+ # ensure iterator is safe - traverse over the list in snapshot created within lock (GIL):
+ return iter(list(self.__banList.values()))
##
# Returns normalized value
@@ -284,8 +293,8 @@ class BanManager:
self.__banTotal += 1
ticket.incrBanCount()
# correct next unban time:
- if self.__nextUnbanTime > eob:
- self.__nextUnbanTime = eob
+ if self._nextUnbanTime > eob:
+ self._nextUnbanTime = eob
return True
##
@@ -314,27 +323,28 @@ class BanManager:
# @param time the time
# @return the list of ticket to unban
- def unBanList(self, time):
+ def unBanList(self, time, maxCount=0x7fffffff):
with self.__lock:
- # Permanent banning
- if self.__banTime < 0:
- return list()
-
# Check next unban time:
- if self.__nextUnbanTime > time:
+ nextUnbanTime = self._nextUnbanTime
+ if nextUnbanTime > time:
return list()
# Gets the list of ticket to remove (thereby correct next unban time).
unBanList = {}
- self.__nextUnbanTime = BanTicket.MAX_TIME
+ nextUnbanTime = BanTicket.MAX_TIME
for fid,ticket in self.__banList.iteritems():
# current time greater as end of ban - timed out:
eob = ticket.getEndOfBanTime(self.__banTime)
if time > eob:
unBanList[fid] = ticket
- elif self.__nextUnbanTime > eob:
- self.__nextUnbanTime = eob
+ if len(unBanList) >= maxCount: # stop search cycle, so reset back the next check time
+ nextUnbanTime = self._nextUnbanTime
+ break
+ elif nextUnbanTime > eob:
+ nextUnbanTime = eob
+ self._nextUnbanTime = nextUnbanTime
# Removes tickets.
if len(unBanList):
if len(unBanList) / 2.0 <= len(self.__banList) / 3.0:
diff --git a/fail2ban/server/database.py b/fail2ban/server/database.py
index ac01382e..877cbb93 100644
--- a/fail2ban/server/database.py
+++ b/fail2ban/server/database.py
@@ -104,7 +104,11 @@ def commitandrollback(f):
def wrapper(self, *args, **kwargs):
with self._lock: # Threading lock
with self._db: # Auto commit and rollback on exception
- return f(self, self._db.cursor(), *args, **kwargs)
+ cur = self._db.cursor()
+ try:
+ return f(self, cur, *args, **kwargs)
+ finally:
+ cur.close()
return wrapper
@@ -191,7 +195,7 @@ class Fail2BanDb(object):
def __init__(self, filename, purgeAge=24*60*60, outDatedFactor=3):
- self.maxEntries = 50
+ self.maxMatches = 10
self._lock = RLock()
self._dbFilename = filename
self._purgeAge = purgeAge
@@ -253,7 +257,7 @@ class Fail2BanDb(object):
self.repairDB()
else:
version = cur.fetchone()[0]
- if version < Fail2BanDb.__version__:
+ if version != Fail2BanDb.__version__:
newversion = self.updateDb(version)
if newversion == Fail2BanDb.__version__:
logSys.warning( "Database updated from '%r' to '%r'",
@@ -301,9 +305,11 @@ class Fail2BanDb(object):
try:
# backup
logSys.info("Trying to repair database %s", self._dbFilename)
- shutil.move(self._dbFilename, self._dbBackupFilename)
- logSys.info(" Database backup created: %s", self._dbBackupFilename)
-
+ if not os.path.isfile(self._dbBackupFilename):
+ shutil.move(self._dbFilename, self._dbBackupFilename)
+ logSys.info(" Database backup created: %s", self._dbBackupFilename)
+ elif os.path.isfile(self._dbFilename):
+ os.remove(self._dbFilename)
# first try to repair using dump/restore in order
Utils.executeCmd((r"""f2b_db=$0; f2b_dbbk=$1; sqlite3 "$f2b_dbbk" ".dump" | sqlite3 "$f2b_db" """,
self._dbFilename, self._dbBackupFilename))
@@ -415,7 +421,7 @@ class Fail2BanDb(object):
logSys.error("Failed to upgrade database '%s': %s",
self._dbFilename, e.args[0],
exc_info=logSys.getEffectiveLevel() <= 10)
- raise
+ self.repairDB()
@commitandrollback
def addJail(self, cur, jail):
@@ -489,22 +495,24 @@ class Fail2BanDb(object):
If log was already present in database, value of last position
in the log file; else `None`
"""
+ return self._addLog(cur, jail, container.getFileName(), container.getPos(), container.getHash())
+
+ def _addLog(self, cur, jail, name, pos=0, md5=None):
lastLinePos = None
cur.execute(
"SELECT firstlinemd5, lastfilepos FROM logs "
"WHERE jail=? AND path=?",
- (jail.name, container.getFileName()))
+ (jail.name, name))
try:
firstLineMD5, lastLinePos = cur.fetchone()
except TypeError:
- firstLineMD5 = False
+ firstLineMD5 = None
- cur.execute(
- "INSERT OR REPLACE INTO logs(jail, path, firstlinemd5, lastfilepos) "
- "VALUES(?, ?, ?, ?)",
- (jail.name, container.getFileName(),
- container.getHash(), container.getPos()))
- if container.getHash() != firstLineMD5:
+ if firstLineMD5 is None and (pos or md5 is not None):
+ cur.execute(
+ "INSERT OR REPLACE INTO logs(jail, path, firstlinemd5, lastfilepos) "
+ "VALUES(?, ?, ?, ?)", (jail.name, name, md5, pos))
+ if md5 is not None and md5 != firstLineMD5:
lastLinePos = None
return lastLinePos
@@ -533,7 +541,7 @@ class Fail2BanDb(object):
return set(row[0] for row in cur.fetchmany())
@commitandrollback
- def updateLog(self, cur, *args, **kwargs):
+ def updateLog(self, cur, jail, container):
"""Updates hash and last position in log file.
Parameters
@@ -543,14 +551,48 @@ class Fail2BanDb(object):
container : FileContainer
File container of the log file being updated.
"""
- self._updateLog(cur, *args, **kwargs)
+ self._updateLog(cur, jail, container.getFileName(), container.getPos(), container.getHash())
- def _updateLog(self, cur, jail, container):
+ def _updateLog(self, cur, jail, name, pos, md5):
cur.execute(
"UPDATE logs SET firstlinemd5=?, lastfilepos=? "
- "WHERE jail=? AND path=?",
- (container.getHash(), container.getPos(),
- jail.name, container.getFileName()))
+ "WHERE jail=? AND path=?", (md5, pos, jail.name, name))
+ # be sure it is set (if not available):
+ if not cur.rowcount:
+ cur.execute(
+ "INSERT OR REPLACE INTO logs(jail, path, firstlinemd5, lastfilepos) "
+ "VALUES(?, ?, ?, ?)", (jail.name, name, md5, pos))
+
+ @commitandrollback
+ def getJournalPos(self, cur, jail, name, time=0, iso=None):
+ """Get journal position from database.
+
+ Parameters
+ ----------
+ jail : Jail
+ Jail of which the journal belongs to.
+ name, time, iso :
+ Journal name (typically systemd-journal) and last known time.
+
+ Returns
+ -------
+ int (or float)
+ Last position (as time) if it was already present in database; else `None`
+ """
+ return self._addLog(cur, jail, name, time, iso); # no hash, just time as iso
+
+ @commitandrollback
+ def updateJournal(self, cur, jail, name, time, iso):
+ """Updates last position (as time) of journal.
+
+ Parameters
+ ----------
+ jail : Jail
+ Jail of which the journal belongs to.
+ name, time, iso :
+ Journal name (typically systemd-journal) and last known time.
+ """
+ self._updateLog(cur, jail, name, time, iso); # no hash, just time as iso
@commitandrollback
def addBan(self, cur, jail, ticket):
@@ -563,7 +605,7 @@ class Fail2BanDb(object):
ticket : BanTicket
Ticket of the ban to be added.
"""
- ip = str(ticket.getIP())
+ ip = str(ticket.getID())
try:
del self._bansMergedCache[(ip, jail)]
except KeyError:
@@ -575,8 +617,13 @@ class Fail2BanDb(object):
#TODO: Implement data parts once arbitrary match keys completed
data = ticket.getData()
matches = data.get('matches')
- if matches and len(matches) > self.maxEntries:
- data['matches'] = matches[-self.maxEntries:]
+ if self.maxMatches:
+ if matches and len(matches) > self.maxMatches:
+ data = data.copy()
+ data['matches'] = matches[-self.maxMatches:]
+ elif matches:
+ data = data.copy()
+ del data['matches']
cur.execute(
"INSERT INTO bans(jail, ip, timeofban, bantime, bancount, data) VALUES(?, ?, ?, ?, ?, ?)",
(jail.name, ip, int(round(ticket.getTime())), ticket.getBanTime(jail.actions.getBanTime()), ticket.getBanCount(),
@@ -710,7 +757,7 @@ class Fail2BanDb(object):
tickdata = {}
m = data.get('matches', [])
# pre-insert "maxadd" enries (because tickets are ordered desc by time)
- maxadd = self.maxEntries - len(matches)
+ maxadd = self.maxMatches - len(matches)
if maxadd > 0:
if len(m) <= maxadd:
matches = m + matches
@@ -748,8 +795,8 @@ class Fail2BanDb(object):
queryArgs.append(fromtime)
if overalljails or jail is None:
query += " GROUP BY ip ORDER BY timeofban DESC LIMIT 1"
- cur = self._db.cursor()
- return cur.execute(query, queryArgs)
+ # repack iterator as long as in lock:
+ return list(cur.execute(query, queryArgs))
def _getCurrentBans(self, cur, jail = None, ip = None, forbantime=None, fromtime=None):
queryArgs = []
@@ -768,12 +815,12 @@ class Fail2BanDb(object):
queryArgs.append(fromtime - forbantime)
if ip is None:
query += " GROUP BY ip ORDER BY ip, timeofban DESC"
- cur = self._db.cursor()
+ else:
+ query += " ORDER BY timeofban DESC LIMIT 1"
return cur.execute(query, queryArgs)
- @commitandrollback
- def getCurrentBans(self, cur, jail=None, ip=None, forbantime=None, fromtime=None,
- correctBanTime=True
+ def getCurrentBans(self, jail=None, ip=None, forbantime=None, fromtime=None,
+ correctBanTime=True, maxmatches=None
):
"""Reads tickets (with merged info) currently affected from ban from the database.
@@ -784,49 +831,65 @@ class Fail2BanDb(object):
(and therefore endOfBan) of the ticket (normally it is ban-time of jail as maximum)
for all tickets with ban-time greater (or persistent).
"""
- if fromtime is None:
- fromtime = MyTime.time()
- tickets = []
- ticket = None
- if correctBanTime is True:
- correctBanTime = jail.getMaxBanTime() if jail is not None else None
- # don't change if persistent allowed:
- if correctBanTime == -1: correctBanTime = None
-
- for ticket in self._getCurrentBans(cur, jail=jail, ip=ip,
- forbantime=forbantime, fromtime=fromtime
- ):
- # can produce unpack error (database may return sporadical wrong-empty row):
- try:
- banip, timeofban, bantime, bancount, data = ticket
- # additionally check for empty values:
- if banip is None or banip == "": # pragma: no cover
- raise ValueError('unexpected value %r' % (banip,))
- # if bantime unknown (after upgrade-db from earlier version), just use min known ban-time:
- if bantime == -2: # todo: remove it in future version
- bantime = jail.actions.getBanTime() if jail is not None else (
- correctBanTime if correctBanTime else 600)
- elif correctBanTime and correctBanTime >= 0:
- # if persistent ban (or greater as max), use current max-bantime of the jail:
- if bantime == -1 or bantime > correctBanTime:
- bantime = correctBanTime
- # after correction check the end of ban again:
- if bantime != -1 and timeofban + bantime <= fromtime:
- # not persistent and too old - ignore it:
- logSys.debug("ignore ticket (with new max ban-time %r): too old %r <= %r, ticket: %r",
- bantime, timeofban + bantime, fromtime, ticket)
+ cur = self._db.cursor()
+ try:
+ if fromtime is None:
+ fromtime = MyTime.time()
+ tickets = []
+ ticket = None
+ if correctBanTime is True:
+ correctBanTime = jail.getMaxBanTime() if jail is not None else None
+ # don't change if persistent allowed:
+ if correctBanTime == -1: correctBanTime = None
+
+ with self._lock:
+ bans = self._getCurrentBans(cur, jail=jail, ip=ip,
+ forbantime=forbantime, fromtime=fromtime
+ )
+ for ticket in bans:
+ # can produce unpack error (database may return sporadical wrong-empty row):
+ try:
+ banip, timeofban, bantime, bancount, data = ticket
+ # additionally check for empty values:
+ if banip is None or banip == "": # pragma: no cover
+ raise ValueError('unexpected value %r' % (banip,))
+ # if bantime unknown (after upgrade-db from earlier version), just use min known ban-time:
+ if bantime == -2: # todo: remove it in future version
+ bantime = jail.actions.getBanTime() if jail is not None else (
+ correctBanTime if correctBanTime else 600)
+ elif correctBanTime and correctBanTime >= 0:
+ # if persistent ban (or greater as max), use current max-bantime of the jail:
+ if bantime == -1 or bantime > correctBanTime:
+ bantime = correctBanTime
+ # after correction check the end of ban again:
+ if bantime != -1 and timeofban + bantime <= fromtime:
+ # not persistent and too old - ignore it:
+ logSys.debug("ignore ticket (with new max ban-time %r): too old %r <= %r, ticket: %r",
+ bantime, timeofban + bantime, fromtime, ticket)
+ continue
+ except ValueError as e: # pragma: no cover
+ logSys.debug("get current bans: ignore row %r - %s", ticket, e)
continue
- except ValueError as e: # pragma: no cover
- logSys.debug("get current bans: ignore row %r - %s", ticket, e)
- continue
- # logSys.debug('restore ticket %r, %r, %r', banip, timeofban, data)
- ticket = FailTicket(banip, timeofban, data=data)
- # logSys.debug('restored ticket: %r', ticket)
- ticket.setBanTime(bantime)
- ticket.setBanCount(bancount)
- tickets.append(ticket)
-
- return tickets if ip is None else ticket
+ # logSys.debug('restore ticket %r, %r, %r', banip, timeofban, data)
+ ticket = FailTicket(banip, timeofban, data=data)
+ # filter matches if expected (current count > as maxmatches specified):
+ if maxmatches is None:
+ maxmatches = self.maxMatches
+ if maxmatches:
+ matches = ticket.getMatches()
+ if matches and len(matches) > maxmatches:
+ ticket.setMatches(matches[-maxmatches:])
+ else:
+ ticket.setMatches(None)
+ # logSys.debug('restored ticket: %r', ticket)
+ ticket.setBanTime(bantime)
+ ticket.setBanCount(bancount)
+ if ip is not None: return ticket
+ tickets.append(ticket)
+ finally:
+ cur.close()
+
+ return tickets
def _cleanjails(self, cur):
"""Remove empty jails jails and log files from database.
diff --git a/fail2ban/server/datedetector.py b/fail2ban/server/datedetector.py
index 2e85b940..b90e1b26 100644
--- a/fail2ban/server/datedetector.py
+++ b/fail2ban/server/datedetector.py
@@ -35,7 +35,7 @@ from ..helpers import getLogger
# Gets the instance of the logger.
logSys = getLogger(__name__)
-logLevel = 6
+logLevel = 5
RE_DATE_PREMATCH = re.compile(r"(?<!\\)\{DATE\}", re.IGNORECASE)
DD_patternCache = Utils.Cache(maxCount=1000, maxTime=60*60)
@@ -128,52 +128,52 @@ class DateDetectorCache(object):
# 2005-01-23T21:59:59.981746, 2005-01-23 21:59:59, 2005-01-23 8:59:59
# simple date: 2005/01/23 21:59:59
# custom for syslog-ng 2006.12.21 06:43:20
- "%ExY(?P<_sep>[-/.])%m(?P=_sep)%d(?:T| ?)%H:%M:%S(?:[.,]%f)?(?:\s*%z)?",
+ r"%ExY(?P<_sep>[-/.])%m(?P=_sep)%d(?:T| ?)%H:%M:%S(?:[.,]%f)?(?:\s*%z)?",
# asctime with optional day, subsecond and/or year:
# Sun Jan 23 21:59:59.011 2005
- "(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
+ r"(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
# asctime with optional day, subsecond and/or year coming after day
# http://bugs.debian.org/798923
# Sun Jan 23 2005 21:59:59.011
- "(?:%a )?%b %d %ExY %k:%M:%S(?:\.%f)?",
+ r"(?:%a )?%b %d %ExY %k:%M:%S(?:\.%f)?",
# simple date too (from x11vnc): 23/01/2005 21:59:59
# and with optional year given by 2 digits: 23/01/05 21:59:59
# (See http://bugs.debian.org/537610)
# 17-07-2008 17:23:25
- "%d(?P<_sep>[-/])%m(?P=_sep)(?:%ExY|%Exy) %k:%M:%S",
+ r"%d(?P<_sep>[-/])%m(?P=_sep)(?:%ExY|%Exy) %k:%M:%S",
# Apache format optional time zone:
# [31/Oct/2006:09:22:55 -0000]
# 26-Jul-2007 15:20:52
# named 26-Jul-2007 15:20:52.252
# roundcube 26-Jul-2007 15:20:52 +0200
- "%d(?P<_sep>[-/])%b(?P=_sep)%ExY[ :]?%H:%M:%S(?:\.%f)?(?: %z)?",
+ r"%d(?P<_sep>[-/])%b(?P=_sep)%ExY[ :]?%H:%M:%S(?:\.%f)?(?: %z)?",
# CPanel 05/20/2008:01:57:39
- "%m/%d/%ExY:%H:%M:%S",
+ r"%m/%d/%ExY:%H:%M:%S",
# 01-27-2012 16:22:44.252
# subseconds explicit to avoid possible %m<->%d confusion
# with previous ("%d-%m-%ExY %k:%M:%S" by "%d(?P<_sep>[-/])%m(?P=_sep)(?:%ExY|%Exy) %k:%M:%S")
- "%m-%d-%ExY %k:%M:%S(?:\.%f)?",
+ r"%m-%d-%ExY %k:%M:%S(?:\.%f)?",
# Epoch
- "EPOCH",
+ r"EPOCH",
# Only time information in the log
- "{^LN-BEG}%H:%M:%S",
+ r"{^LN-BEG}%H:%M:%S",
# <09/16/08@05:03:30>
- "^<%m/%d/%Exy@%H:%M:%S>",
+ r"^<%m/%d/%Exy@%H:%M:%S>",
# MySQL: 130322 11:46:11
- "%Exy%Exm%Exd ?%H:%M:%S",
+ r"%Exy%Exm%Exd ?%H:%M:%S",
# Apache Tomcat
- "%b %d, %ExY %I:%M:%S %p",
+ r"%b %d, %ExY %I:%M:%S %p",
# ASSP: Apr-27-13 02:33:06
- "^%b-%d-%Exy %k:%M:%S",
+ r"^%b-%d-%Exy %k:%M:%S",
# 20050123T215959, 20050123 215959, 20050123 85959
- "%ExY%Exm%Exd(?:T| ?)%ExH%ExM%ExS(?:[.,]%f)?(?:\s*%z)?",
+ r"%ExY%Exm%Exd(?:T| ?)%ExH%ExM%ExS(?:[.,]%f)?(?:\s*%z)?",
# prefixed with optional named time zone (monit):
# PDT Apr 16 21:05:29
- "(?:%Z )?(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
+ r"(?:%Z )?(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
# +00:00 Jan 23 21:59:59.011 2005
- "(?:%z )?(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
+ r"(?:%z )?(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
# TAI64N
- "TAI64N",
+ r"TAI64N",
]
@property
@@ -282,6 +282,8 @@ class DateDetector(object):
elif "{DATE}" in key:
self.addDefaultTemplate(preMatch=pattern, allDefaults=False)
return
+ elif key == "{NONE}":
+ template = _getPatternTemplate('{UNB}^', key)
else:
template = _getPatternTemplate(pattern, key)
@@ -337,65 +339,76 @@ class DateDetector(object):
# if no templates specified - default templates should be used:
if not len(self.__templates):
self.addDefaultTemplate()
- logSys.log(logLevel-1, "try to match time for line: %.120s", line)
- match = None
+ log = logSys.log if logSys.getEffectiveLevel() <= logLevel else lambda *args: None
+ log(logLevel-1, "try to match time for line: %.120s", line)
+
# first try to use last template with same start/end position:
+ match = None
+ found = None, 0x7fffffff, 0x7fffffff, -1
ignoreBySearch = 0x7fffffff
i = self.__lastTemplIdx
if i < len(self.__templates):
ddtempl = self.__templates[i]
template = ddtempl.template
if template.flags & (DateTemplate.LINE_BEGIN|DateTemplate.LINE_END):
- if logSys.getEffectiveLevel() <= logLevel-1: # pragma: no cover - very-heavy debug
- logSys.log(logLevel-1, " try to match last anchored template #%02i ...", i)
+ log(logLevel-1, " try to match last anchored template #%02i ...", i)
match = template.matchDate(line)
ignoreBySearch = i
else:
distance, endpos = self.__lastPos[0], self.__lastEndPos[0]
- if logSys.getEffectiveLevel() <= logLevel-1:
- logSys.log(logLevel-1, " try to match last template #%02i (from %r to %r): ...%r==%r %s %r==%r...",
- i, distance, endpos,
- line[distance-1:distance], self.__lastPos[1],
- line[distance:endpos],
- line[endpos:endpos+1], self.__lastEndPos[1])
- # check same boundaries left/right, otherwise possible collision/pattern switch:
- if (line[distance-1:distance] == self.__lastPos[1] and
- line[endpos:endpos+1] == self.__lastEndPos[1]
- ):
+ log(logLevel-1, " try to match last template #%02i (from %r to %r): ...%r==%r %s %r==%r...",
+ i, distance, endpos,
+ line[distance-1:distance], self.__lastPos[1],
+ line[distance:endpos],
+ line[endpos:endpos+1], self.__lastEndPos[2])
+ # check same boundaries left/right, outside fully equal, inside only if not alnum (e. g. bound RE
+ # with space or some special char), otherwise possible collision/pattern switch:
+ if ((
+ line[distance-1:distance] == self.__lastPos[1] or
+ (line[distance:distance+1] == self.__lastPos[2] and not self.__lastPos[2].isalnum())
+ ) and (
+ line[endpos:endpos+1] == self.__lastEndPos[2] or
+ (line[endpos-1:endpos] == self.__lastEndPos[1] and not self.__lastEndPos[1].isalnum())
+ )):
+ # search in line part only:
+ log(logLevel-1, " boundaries are correct, search in part %r", line[distance:endpos])
match = template.matchDate(line, distance, endpos)
+ else:
+ log(logLevel-1, " boundaries show conflict, try whole search")
+ match = template.matchDate(line)
+ ignoreBySearch = i
if match:
distance = match.start()
endpos = match.end()
# if different position, possible collision/pattern switch:
if (
+ len(self.__templates) == 1 or # single template:
template.flags & (DateTemplate.LINE_BEGIN|DateTemplate.LINE_END) or
(distance == self.__lastPos[0] and endpos == self.__lastEndPos[0])
):
- logSys.log(logLevel, " matched last time template #%02i", i)
+ log(logLevel, " matched last time template #%02i", i)
else:
- logSys.log(logLevel, " ** last pattern collision - pattern change, search ...")
+ log(logLevel, " ** last pattern collision - pattern change, reserve & search ...")
+ found = match, distance, endpos, i; # save current best alternative
match = None
else:
- logSys.log(logLevel, " ** last pattern not found - pattern change, search ...")
+ log(logLevel, " ** last pattern not found - pattern change, search ...")
# search template and better match:
if not match:
- logSys.log(logLevel, " search template (%i) ...", len(self.__templates))
- found = None, 0x7fffffff, 0x7fffffff, -1
+ log(logLevel, " search template (%i) ...", len(self.__templates))
i = 0
for ddtempl in self.__templates:
- if logSys.getEffectiveLevel() <= logLevel-1:
- logSys.log(logLevel-1, " try template #%02i: %s", i, ddtempl.name)
if i == ignoreBySearch:
i += 1
continue
+ log(logLevel-1, " try template #%02i: %s", i, ddtempl.name)
template = ddtempl.template
match = template.matchDate(line)
if match:
distance = match.start()
endpos = match.end()
- if logSys.getEffectiveLevel() <= logLevel:
- logSys.log(logLevel, " matched time template #%02i (at %r <= %r, %r) %s",
- i, distance, ddtempl.distance, self.__lastPos[0], template.name)
+ log(logLevel, " matched time template #%02i (at %r <= %r, %r) %s",
+ i, distance, ddtempl.distance, self.__lastPos[0], template.name)
## last (or single) template - fast stop:
if i+1 >= len(self.__templates):
break
@@ -408,7 +421,7 @@ class DateDetector(object):
## [grave] if distance changed, possible date-match was found somewhere
## in body of message, so save this template, and search further:
if distance > ddtempl.distance or distance > self.__lastPos[0]:
- logSys.log(logLevel, " ** distance collision - pattern change, reserve")
+ log(logLevel, " ** distance collision - pattern change, reserve")
## shortest of both:
if distance < found[1]:
found = match, distance, endpos, i
@@ -422,7 +435,7 @@ class DateDetector(object):
# check other template was found (use this one with shortest distance):
if not match and found[0]:
match, distance, endpos, i = found
- logSys.log(logLevel, " use best time template #%02i", i)
+ log(logLevel, " use best time template #%02i", i)
ddtempl = self.__templates[i]
template = ddtempl.template
# we've winner, incr hits, set distance, usage, reorder, etc:
@@ -432,8 +445,8 @@ class DateDetector(object):
ddtempl.distance = distance
if self.__firstUnused == i:
self.__firstUnused += 1
- self.__lastPos = distance, line[distance-1:distance]
- self.__lastEndPos = endpos, line[endpos:endpos+1]
+ self.__lastPos = distance, line[distance-1:distance], line[distance]
+ self.__lastEndPos = endpos, line[endpos-1], line[endpos:endpos+1]
# if not first - try to reorder current template (bubble up), they will be not sorted anymore:
if i and i != self.__lastTemplIdx:
i = self._reorderTemplate(i)
@@ -442,7 +455,7 @@ class DateDetector(object):
return (match, template)
# not found:
- logSys.log(logLevel, " no template.")
+ log(logLevel, " no template.")
return (None, None)
@property
diff --git a/fail2ban/server/datetemplate.py b/fail2ban/server/datetemplate.py
index e032c2b0..518805bb 100644
--- a/fail2ban/server/datetemplate.py
+++ b/fail2ban/server/datetemplate.py
@@ -35,16 +35,18 @@ logSys = getLogger(__name__)
# check already grouped contains "(", but ignores char "\(" and conditional "(?(id)...)":
RE_GROUPED = re.compile(r'(?<!(?:\(\?))(?<!\\)\((?!\?)')
RE_GROUP = ( re.compile(r'^((?:\(\?\w+\))?\^?(?:\(\?\w+\))?)(.*?)(\$?)$'), r"\1(\2)\3" )
+RE_GLOBALFLAGS = re.compile(r'((?:^|(?!<\\))\(\?[a-z]+\))')
+RE_EXLINE_NO_BOUNDS = re.compile(r'^\{UNB\}')
RE_EXLINE_BOUND_BEG = re.compile(r'^\{\^LN-BEG\}')
-RE_EXSANC_BOUND_BEG = re.compile(r'^\(\?:\^\|\\b\|\\W\)')
+RE_EXSANC_BOUND_BEG = re.compile(r'^\((?:\?:)?\^\|\\b\|\\W\)')
RE_EXEANC_BOUND_BEG = re.compile(r'\(\?=\\b\|\\W\|\$\)$')
-RE_NO_WRD_BOUND_BEG = re.compile(r'^\(*(?:\(\?\w+\))?(?:\^|\(*\*\*|\(\?:\^)')
+RE_NO_WRD_BOUND_BEG = re.compile(r'^\(*(?:\(\?\w+\))?(?:\^|\(*\*\*|\((?:\?:)?\^)')
RE_NO_WRD_BOUND_END = re.compile(r'(?<!\\)(?:\$\)?|\\b|\\s|\*\*\)*)$')
RE_DEL_WRD_BOUNDS = ( re.compile(r'^\(*(?:\(\?\w+\))?\(*\*\*|(?<!\\)\*\*\)*$'),
lambda m: m.group().replace('**', '') )
-RE_LINE_BOUND_BEG = re.compile(r'^(?:\(\?\w+\))?(?:\^|\(\?:\^(?!\|))')
+RE_LINE_BOUND_BEG = re.compile(r'^(?:\(\?\w+\))?(?:\^|\((?:\?:)?\^(?!\|))')
RE_LINE_BOUND_END = re.compile(r'(?<![\\\|])(?:\$\)?)$')
RE_ALPHA_PATTERN = re.compile(r'(?<!\%)\%[aAbBpc]')
@@ -82,7 +84,7 @@ class DateTemplate(object):
return self._regex
def setRegex(self, regex, wordBegin=True, wordEnd=True):
- """Sets regex to use for searching for date in log line.
+ r"""Sets regex to use for searching for date in log line.
Parameters
----------
@@ -109,6 +111,11 @@ class DateTemplate(object):
# because it may be very slow in negative case (by long log-lines not matching pattern)
regex = regex.strip()
+ # cut global flags like (?iu) from RE in order to pre-set it after processing:
+ gf = RE_GLOBALFLAGS.search(regex)
+ if gf:
+ regex = RE_GLOBALFLAGS.sub('', regex, count=1)
+ # check word boundaries needed:
boundBegin = wordBegin and not RE_NO_WRD_BOUND_BEG.search(regex)
boundEnd = wordEnd and not RE_NO_WRD_BOUND_END.search(regex)
# if no group add it now, should always have a group(1):
@@ -119,7 +126,7 @@ class DateTemplate(object):
if boundBegin:
self.flags |= DateTemplate.WORD_BEGIN if wordBegin != 'start' else DateTemplate.LINE_BEGIN
if wordBegin != 'start':
- regex = r'(?:^|\b|\W)' + regex
+ regex = r'(?=^|\b|\W)' + regex
else:
regex = r"^(?:\W{0,2})?" + regex
if not self.name.startswith('{^LN-BEG}'):
@@ -128,12 +135,16 @@ class DateTemplate(object):
if boundEnd:
self.flags |= DateTemplate.WORD_END
regex += r'(?=\b|\W|$)'
- if RE_LINE_BOUND_BEG.search(regex): self.flags |= DateTemplate.LINE_BEGIN
- if RE_LINE_BOUND_END.search(regex): self.flags |= DateTemplate.LINE_END
+ if not (self.flags & DateTemplate.LINE_BEGIN) and RE_LINE_BOUND_BEG.search(regex):
+ self.flags |= DateTemplate.LINE_BEGIN
+ if not (self.flags & DateTemplate.LINE_END) and RE_LINE_BOUND_END.search(regex):
+ self.flags |= DateTemplate.LINE_END
# remove possible special pattern "**" in front and end of regex:
regex = RE_DEL_WRD_BOUNDS[0].sub(RE_DEL_WRD_BOUNDS[1], regex)
+ if gf: # restore global flags:
+ regex = gf.group(1) + regex
self._regex = regex
- logSys.log(7, ' constructed regex %s', regex)
+ logSys.log(4, ' constructed regex %s', regex)
self._cRegex = None
regex = property(getRegex, setRegex, doc=
@@ -156,6 +167,7 @@ class DateTemplate(object):
"""
if not self._cRegex:
self._compileRegex()
+ logSys.log(4, " search %s", self.regex)
dateMatch = self._cRegex.search(line, *args); # pos, endpos
if dateMatch:
self.hits += 1
@@ -188,7 +200,7 @@ class DateTemplate(object):
def unboundPattern(pattern):
return RE_EXEANC_BOUND_BEG.sub('',
RE_EXSANC_BOUND_BEG.sub('',
- RE_EXLINE_BOUND_BEG.sub('', pattern)
+ RE_EXLINE_BOUND_BEG.sub('', RE_EXLINE_NO_BOUNDS.sub('', pattern))
)
)
@@ -215,8 +227,10 @@ class DateEpoch(DateTemplate):
self.name = "LongEpoch" if not pattern else pattern
epochRE = r"\d{10,11}(?:\d{3}(?:\.\d{1,6}|\d{3})?)?"
if pattern:
- # pattern should capture/cut out the whole match:
- regex = "(" + RE_EPOCH_PATTERN.sub(lambda v: "(%s)" % epochRE, pattern) + ")"
+ # pattern should find the whole pattern, but cut out grouped match (or whole match if no groups specified):
+ regex = RE_EPOCH_PATTERN.sub(lambda v: "(%s)" % epochRE, pattern)
+ if not RE_GROUPED.search(pattern):
+ regex = "(" + regex + ")"
self._grpIdx = 2
self.setRegex(regex)
elif not lineBeginOnly:
@@ -297,18 +311,25 @@ class DatePatternRegex(DateTemplate):
def setRegex(self, pattern, wordBegin=True, wordEnd=True):
# original pattern:
self._pattern = pattern
+ # if unbound signalled - reset boundaries left and right:
+ if RE_EXLINE_NO_BOUNDS.search(pattern):
+ pattern = RE_EXLINE_NO_BOUNDS.sub('', pattern)
+ wordBegin = wordEnd = False
# if explicit given {^LN-BEG} - remove it from pattern and set 'start' in wordBegin:
if wordBegin and RE_EXLINE_BOUND_BEG.search(pattern):
pattern = RE_EXLINE_BOUND_BEG.sub('', pattern)
wordBegin = 'start'
- # wrap to regex:
- fmt = self._patternRE.sub(r'%(\1)s', pattern)
- self.name = fmt % self._patternName
- regex = fmt % timeRE
- # if expected add (?iu) for "ignore case" and "unicode":
- if RE_ALPHA_PATTERN.search(pattern):
- regex = r'(?iu)' + regex
- super(DatePatternRegex, self).setRegex(regex, wordBegin, wordEnd)
+ try:
+ # wrap to regex:
+ fmt = self._patternRE.sub(r'%(\1)s', pattern)
+ self.name = fmt % self._patternName
+ regex = fmt % timeRE
+ # if expected add (?iu) for "ignore case" and "unicode":
+ if RE_ALPHA_PATTERN.search(pattern):
+ regex = r'(?iu)' + regex
+ super(DatePatternRegex, self).setRegex(regex, wordBegin, wordEnd)
+ except Exception as e:
+ raise TypeError("Failed to set datepattern '%s' (may be an invalid format or unescaped percent char): %s" % (pattern, e))
def getDate(self, line, dateMatch=None, default_tz=None):
"""Method to return the date for a log line.
diff --git a/fail2ban/server/failmanager.py b/fail2ban/server/failmanager.py
index 6ce9b74e..3c71d51a 100644
--- a/fail2ban/server/failmanager.py
+++ b/fail2ban/server/failmanager.py
@@ -43,26 +43,20 @@ class FailManager:
self.__maxRetry = 3
self.__maxTime = 600
self.__failTotal = 0
- self.maxEntries = 50
+ self.maxMatches = 5
self.__bgSvc = BgService()
def setFailTotal(self, value):
- with self.__lock:
- self.__failTotal = value
+ self.__failTotal = value
def getFailTotal(self):
- with self.__lock:
- return self.__failTotal
+ return self.__failTotal
def getFailCount(self):
# may be slow on large list of failures, should be used for test purposes only...
with self.__lock:
return len(self.__failList), sum([f.getRetry() for f in self.__failList.values()])
- def getFailTotal(self):
- with self.__lock:
- return self.__failTotal
-
def setMaxRetry(self, value):
self.__maxRetry = value
@@ -87,24 +81,24 @@ class FailManager:
attempt = 1
else:
# will be incremented / extended (be sure we have at least +1 attempt):
- matches = ticket.getMatches()
+ matches = ticket.getMatches() if self.maxMatches else None
attempt = ticket.getAttempt()
if attempt <= 0:
attempt += 1
unixTime = ticket.getTime()
- fData.setLastTime(unixTime)
- if fData.getLastReset() < unixTime - self.__maxTime:
- fData.setLastReset(unixTime)
- fData.setRetry(0)
+ fData.adjustTime(unixTime, self.__maxTime)
fData.inc(matches, attempt, count)
- # truncate to maxEntries:
- matches = fData.getMatches()
- if len(matches) > self.maxEntries:
- fData.setMatches(matches[-self.maxEntries:])
+ # truncate to maxMatches:
+ if self.maxMatches:
+ matches = fData.getMatches()
+ if len(matches) > self.maxMatches:
+ fData.setMatches(matches[-self.maxMatches:])
+ else:
+ fData.setMatches(None)
except KeyError:
# not found - already banned - prevent to add failure if comes from observer:
if observed or isinstance(ticket, BanTicket):
- return
+ return ticket.getRetry()
# if already FailTicket - add it direct, otherwise create (using copy all ticket data):
if isinstance(ticket, FailTicket):
fData = ticket;
@@ -130,13 +124,13 @@ class FailManager:
return attempts
def size(self):
- with self.__lock:
- return len(self.__failList)
+ return len(self.__failList)
def cleanup(self, time):
+ time -= self.__maxTime
with self.__lock:
todelete = [fid for fid,item in self.__failList.iteritems() \
- if item.getLastTime() + self.__maxTime <= time]
+ if item.getTime() <= time]
if len(todelete) == len(self.__failList):
# remove all:
self.__failList = dict()
@@ -150,7 +144,7 @@ class FailManager:
else:
# create new dictionary without items to be deleted:
self.__failList = dict((fid,item) for fid,item in self.__failList.iteritems() \
- if item.getLastTime() + self.__maxTime > time)
+ if item.getTime() > time)
self.__bgSvc.service()
def delFailure(self, fid):
@@ -162,7 +156,7 @@ class FailManager:
def toBan(self, fid=None):
with self.__lock:
- for fid in ([fid] if fid != None and fid in self.__failList else self.__failList):
+ for fid in ([fid] if fid is not None and fid in self.__failList else self.__failList):
data = self.__failList[fid]
if data.getRetry() >= self.__maxRetry:
del self.__failList[fid]
diff --git a/fail2ban/server/failregex.py b/fail2ban/server/failregex.py
index 672bc32a..a9b144af 100644
--- a/fail2ban/server/failregex.py
+++ b/fail2ban/server/failregex.py
@@ -37,25 +37,28 @@ R_HOST = [
r"""(?:::f{4,6}:)?(?P<ip4>%s)""" % (IPAddr.IP_4_RE,),
# separated ipv6:
r"""(?P<ip6>%s)""" % (IPAddr.IP_6_RE,),
- # place-holder for ipv6 enclosed in optional [] (used in addr-, host-regex)
- "",
# separated dns:
r"""(?P<dns>[\w\-.^_]*\w)""",
# place-holder for ADDR tag-replacement (joined):
"",
# place-holder for HOST tag replacement (joined):
- ""
+ "",
+ # CIDR in simplest integer form:
+ r"(?P<cidr>\d+)",
+ # place-holder for SUBNET tag-replacement
+ "",
]
RI_IPV4 = 0
RI_IPV6 = 1
-RI_IPV6BR = 2
-RI_DNS = 3
-RI_ADDR = 4
-RI_HOST = 5
+RI_DNS = 2
+RI_ADDR = 3
+RI_HOST = 4
+RI_CIDR = 5
+RI_SUBNET = 6
-R_HOST[RI_IPV6BR] = r"""\[?%s\]?""" % (R_HOST[RI_IPV6],)
-R_HOST[RI_ADDR] = "(?:%s)" % ("|".join((R_HOST[RI_IPV4], R_HOST[RI_IPV6BR])),)
-R_HOST[RI_HOST] = "(?:%s)" % ("|".join((R_HOST[RI_IPV4], R_HOST[RI_IPV6BR], R_HOST[RI_DNS])),)
+R_HOST[RI_ADDR] = r"\[?(?:%s|%s)\]?" % (R_HOST[RI_IPV4], R_HOST[RI_IPV6],)
+R_HOST[RI_HOST] = r"(?:%s|%s)" % (R_HOST[RI_ADDR], R_HOST[RI_DNS],)
+R_HOST[RI_SUBNET] = r"\[?(?:%s|%s)(?:/%s)?\]?" % (R_HOST[RI_IPV4], R_HOST[RI_IPV6], R_HOST[RI_CIDR],)
RH4TAG = {
# separated ipv4 (self closed, closed):
@@ -68,6 +71,11 @@ RH4TAG = {
# for separate usage of 2 address groups only (regardless of `usedns`), `ip4` and `ip6` together
"ADDR": R_HOST[RI_ADDR],
"F-ADDR/": R_HOST[RI_ADDR],
+ # subnet tags for usage as `<ADDR>/<CIDR>` or `<SUBNET>`:
+ "CIDR": R_HOST[RI_CIDR],
+ "F-CIDR/": R_HOST[RI_CIDR],
+ "SUBNET": R_HOST[RI_SUBNET],
+ "F-SUBNET/":R_HOST[RI_SUBNET],
# separated dns (self closed, closed):
"DNS": R_HOST[RI_DNS],
"F-DNS/": R_HOST[RI_DNS],
@@ -79,20 +87,31 @@ RH4TAG = {
# default failure groups map for customizable expressions (with different group-id):
R_MAP = {
- "ID": "fid",
- "PORT": "fport",
+ "id": "fid",
+ "port": "fport",
}
+# map global flags like ((?i)xxx) or (?:(?i)xxx) to local flags (?i:xxx) if supported by RE-engine in this python version:
+try:
+ re.search("^re(?i:val)$", "reVAL")
+ R_GLOB2LOCFLAGS = ( re.compile(r"(?<!\\)\((?:\?:)?(\(\?[a-z]+)\)"), r"\1:" )
+except:
+ R_GLOB2LOCFLAGS = ()
+
def mapTag2Opt(tag):
- try: # if should be mapped:
- return R_MAP[tag]
- except KeyError:
- return tag.lower()
+ tag = tag.lower()
+ return R_MAP.get(tag, tag)
-# alternate names to be merged, e. g. alt_user_1 -> user ...
+# complex names:
+# ALT_ - alternate names to be merged, e. g. alt_user_1 -> user ...
ALTNAME_PRE = 'alt_'
-ALTNAME_CRE = re.compile(r'^' + ALTNAME_PRE + r'(.*)(?:_\d+)?$')
+# TUPLE_ - names of parts to be combined to single value as tuple
+TUPNAME_PRE = 'tuple_'
+
+COMPLNAME_PRE = (ALTNAME_PRE, TUPNAME_PRE)
+COMPLNAME_CRE = re.compile(r'^(' + '|'.join(COMPLNAME_PRE) + r')(.*?)(?:_\d+)?$')
+
##
# Regular expression class.
@@ -116,20 +135,33 @@ class Regex:
#
if regex.lstrip() == '':
raise RegexException("Cannot add empty regex")
+ # special handling wrapping global flags to local flags:
+ if R_GLOB2LOCFLAGS:
+ regex = R_GLOB2LOCFLAGS[0].sub(R_GLOB2LOCFLAGS[1], regex)
try:
self._regexObj = re.compile(regex, re.MULTILINE if multiline else 0)
self._regex = regex
- self._altValues = {}
+ self._altValues = []
+ self._tupleValues = []
for k in filter(
- lambda k: len(k) > len(ALTNAME_PRE) and k.startswith(ALTNAME_PRE),
- self._regexObj.groupindex
+ lambda k: len(k) > len(COMPLNAME_PRE[0]), self._regexObj.groupindex
):
- n = ALTNAME_CRE.match(k).group(1)
- self._altValues[k] = n
- self._altValues = list(self._altValues.items()) if len(self._altValues) else None
- except sre_constants.error:
- raise RegexException("Unable to compile regular expression '%s'" %
- regex)
+ n = COMPLNAME_CRE.match(k)
+ if n:
+ g, n = n.group(1), mapTag2Opt(n.group(2))
+ if g == ALTNAME_PRE:
+ self._altValues.append((k,n))
+ else:
+ self._tupleValues.append((k,n))
+ self._altValues.sort()
+ self._tupleValues.sort()
+ self._altValues = self._altValues if len(self._altValues) else None
+ self._tupleValues = self._tupleValues if len(self._tupleValues) else None
+ except sre_constants.error as e:
+ raise RegexException("Unable to compile regular expression '%s':\n%s" %
+ (regex, e))
+ # set fetch handler depending on presence of alternate (or tuple) tags:
+ self.getGroups = self._getGroupsWithAlt if (self._altValues or self._tupleValues) else self._getGroups
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self._regex)
@@ -269,18 +301,33 @@ class Regex:
# Returns all matched groups.
#
- def getGroups(self):
- if not self._altValues:
- return self._matchCache.groupdict()
- # merge alternate values (e. g. 'alt_user_1' -> 'user' or 'alt_host' -> 'host'):
+ def _getGroups(self):
+ return self._matchCache.groupdict()
+
+ def _getGroupsWithAlt(self):
fail = self._matchCache.groupdict()
#fail = fail.copy()
- for k,n in self._altValues:
- v = fail.get(k)
- if v and not fail.get(n):
- fail[n] = v
+ # merge alternate values (e. g. 'alt_user_1' -> 'user' or 'alt_host' -> 'host'):
+ if self._altValues:
+ for k,n in self._altValues:
+ v = fail.get(k)
+ if v and not fail.get(n):
+ fail[n] = v
+ # combine tuple values (e. g. 'id', 'tuple_id' ... 'tuple_id_N' -> 'id'):
+ if self._tupleValues:
+ for k,n in self._tupleValues:
+ v = fail.get(k)
+ t = fail.get(n)
+ if isinstance(t, tuple):
+ t += (v,)
+ else:
+ t = (t,v,)
+ fail[n] = t
return fail
+ def getGroups(self): # pragma: no cover - abstract function (replaced in __init__)
+ pass
+
##
# Returns skipped lines.
#
@@ -416,3 +463,7 @@ class FailRegex(Regex):
def getHost(self):
return self.getFailID(("ip4", "ip6", "dns"))
+
+ def getIP(self):
+ fail = self.getGroups()
+ return IPAddr(self.getFailID(("ip4", "ip6")), int(fail.get("cidr") or IPAddr.CIDR_UNSPEC))
diff --git a/fail2ban/server/filter.py b/fail2ban/server/filter.py
index 945ee9dd..68968284 100644
--- a/fail2ban/server/filter.py
+++ b/fail2ban/server/filter.py
@@ -81,6 +81,7 @@ class Filter(JailThread):
## Ignore own IPs flag:
self.__ignoreSelf = True
## The ignore IP list.
+ self.__ignoreIpSet = set()
self.__ignoreIpList = []
## External command
self.__ignoreCommand = False
@@ -93,6 +94,8 @@ class Filter(JailThread):
## Store last time stamp, applicable for multi-line
self.__lastTimeText = ""
self.__lastDate = None
+ ## Next service (cleanup) time
+ self.__nextSvcTime = -(1<<63)
## if set, treat log lines without explicit time zone to be in this time zone
self.__logtimezone = None
## Default or preferred encoding (to decode bytes from file or journal):
@@ -102,14 +105,28 @@ class Filter(JailThread):
## Error counter (protected, so can be used in filter implementations)
## if it reached 100 (at once), run-cycle will go idle
self._errors = 0
+ ## Next time to update log or journal position in database:
+ self._nextUpdateTM = 0
+ ## Pending updates (must be executed at next update time or during stop):
+ self._pendDBUpdates = {}
## return raw host (host is not dns):
self.returnRawHost = False
## check each regex (used for test purposes):
self.checkAllRegex = False
+ ## avoid finding of pending failures (without ID/IP, used in fail2ban-regex):
+ self.ignorePending = True
+ ## callback called on ignoreregex match :
+ self.onIgnoreRegex = None
## if true ignores obsolete failures (failure time < now - findTime):
self.checkFindTime = True
+ ## shows that filter is in operation mode (processing new messages):
+ self.inOperation = True
## Ticks counter
self.ticks = 0
+ ## Processed lines counter
+ self.procLines = 0
+ ## Thread name:
+ self.name="f2b/f."+self.jailName
self.dateDetector = DateDetector()
logSys.debug("Created %s", self)
@@ -167,7 +184,7 @@ class Filter(JailThread):
# @param value the regular expression
def addFailRegex(self, value):
- multiLine = self.getMaxLines() > 1
+ multiLine = self.__lineBufferSize > 1
try:
regex = FailRegex(value, prefRegex=self.__prefRegex, multiline=multiLine,
useDns=self.__useDns)
@@ -428,31 +445,45 @@ class Filter(JailThread):
)
else:
self.__ignoreCache = None
- ##
- # Ban an IP - http://blogs.buanzo.com.ar/2009/04/fail2ban-patch-ban-ip-address-manually.html
- # Arturo 'Buanzo' Busleiman <buanzo@buanzo.com.ar>
- #
- # to enable banip fail2ban-client BAN command
- def addBannedIP(self, ip):
+ def performBan(self, ip=None):
+ """Performs a ban for IPs (or given ip) that are reached maxretry of the jail."""
+ while True:
+ try:
+ ticket = self.failManager.toBan(ip)
+ except FailManagerEmpty:
+ break
+ self.jail.putFailTicket(ticket)
+ if ip: break
+ self.performSvc()
+
+ def performSvc(self, force=False):
+ """Performs a service tasks (clean failure list)."""
+ tm = MyTime.time()
+ # avoid too early clean up:
+ if force or tm >= self.__nextSvcTime:
+ self.__nextSvcTime = tm + 5
+ # clean up failure list:
+ self.failManager.cleanup(tm)
+
+ def addAttempt(self, ip, *matches):
+ """Generate a failed attempt for ip"""
if not isinstance(ip, IPAddr):
ip = IPAddr(ip)
+ matches = list(matches) # tuple to list
+ # Generate the failure attempt for the IP:
unixTime = MyTime.time()
- ticket = FailTicket(ip, unixTime)
- if self._inIgnoreIPList(ip, ticket, log_ignore=False):
- logSys.warning('Requested to manually ban an ignored IP %s. User knows best. Proceeding to ban it.', ip)
- self.failManager.addFailure(ticket, self.failManager.getMaxRetry())
+ ticket = FailTicket(ip, unixTime, matches=matches)
+ logSys.info(
+ "[%s] Attempt %s - %s", self.jailName, ip, datetime.datetime.fromtimestamp(unixTime).strftime("%Y-%m-%d %H:%M:%S")
+ )
+ attempts = self.failManager.addFailure(ticket, len(matches) or 1)
+ # Perform the ban if this attempt is resulted to:
+ if attempts >= self.failManager.getMaxRetry():
+ self.performBan(ip)
- # Perform the banning of the IP now.
- try: # pragma: no branch - exception is the only way out
- while True:
- ticket = self.failManager.toBan(ip)
- self.jail.putFailTicket(ticket)
- except FailManagerEmpty:
- self.failManager.cleanup(MyTime.time())
-
- return ip
+ return 1
##
# Ignore own IP/DNS.
@@ -479,28 +510,36 @@ class Filter(JailThread):
# Create IP address object
ip = IPAddr(ipstr)
# Avoid exact duplicates
- if ip in self.__ignoreIpList:
- logSys.warn(" Ignore duplicate %r (%r), already in ignore list", ip, ipstr)
+ if ip in self.__ignoreIpSet or ip in self.__ignoreIpList:
+ logSys.log(logging.MSG, " Ignore duplicate %r (%r), already in ignore list", ip, ipstr)
return
# log and append to ignore list
logSys.debug(" Add %r to ignore list (%r)", ip, ipstr)
- self.__ignoreIpList.append(ip)
+ # if single IP (not DNS or a subnet) add to set, otherwise to list:
+ if ip.isSingle:
+ self.__ignoreIpSet.add(ip)
+ else:
+ self.__ignoreIpList.append(ip)
def delIgnoreIP(self, ip=None):
# clear all:
if ip is None:
+ self.__ignoreIpSet.clear()
del self.__ignoreIpList[:]
return
# delete by ip:
logSys.debug(" Remove %r from ignore list", ip)
- self.__ignoreIpList.remove(ip)
+ if ip in self.__ignoreIpSet:
+ self.__ignoreIpSet.remove(ip)
+ else:
+ self.__ignoreIpList.remove(ip)
def logIgnoreIp(self, ip, log_ignore, ignore_source="unknown source"):
if log_ignore:
logSys.info("[%s] Ignore %s by %s", self.jailName, ip, ignore_source)
def getIgnoreIP(self):
- return self.__ignoreIpList
+ return self.__ignoreIpList + list(self.__ignoreIpSet)
##
# Check if IP address/DNS is in the ignore list.
@@ -514,7 +553,7 @@ class Filter(JailThread):
ticket = None
if isinstance(ip, FailTicket):
ticket = ip
- ip = ticket.getIP()
+ ip = ticket.getID()
elif not isinstance(ip, IPAddr):
ip = IPAddr(ip)
return self._inIgnoreIPList(ip, ticket, log_ignore)
@@ -540,8 +579,11 @@ class Filter(JailThread):
if self.__ignoreCache: c.set(key, True)
return True
+ # check if the IP is covered by ignore IP (in set or in subnet/dns):
+ if ip in self.__ignoreIpSet:
+ self.logIgnoreIp(ip, log_ignore, ignore_source="ip")
+ return True
for net in self.__ignoreIpList:
- # check if the IP is covered by ignore IP
if ip.isInNet(net):
self.logIgnoreIp(ip, log_ignore, ignore_source=("ip" if net.isValid else "dns"))
if self.__ignoreCache: c.set(key, True)
@@ -564,50 +606,126 @@ class Filter(JailThread):
if self.__ignoreCache: c.set(key, False)
return False
+ def _logWarnOnce(self, nextLTM, *args):
+ """Log some issue as warning once per day, otherwise level 7"""
+ if MyTime.time() < getattr(self, nextLTM, 0):
+ if logSys.getEffectiveLevel() <= 7: logSys.log(7, *(args[0]))
+ else:
+ setattr(self, nextLTM, MyTime.time() + 24*60*60)
+ for args in args:
+ logSys.warning('[%s] ' + args[0], self.jailName, *args[1:])
+
def processLine(self, line, date=None):
"""Split the time portion from log msg and return findFailures on them
"""
+ logSys.log(7, "Working on line %r", line)
+
+ noDate = False
if date:
tupleLine = line
+ line = "".join(line)
+ self.__lastTimeText = tupleLine[1]
+ self.__lastDate = date
else:
- l = line.rstrip('\r\n')
- logSys.log(7, "Working on line %r", line)
-
- (timeMatch, template) = self.dateDetector.matchTime(l)
- if timeMatch:
- tupleLine = (
- l[:timeMatch.start(1)],
- l[timeMatch.start(1):timeMatch.end(1)],
- l[timeMatch.end(1):],
- (timeMatch, template)
- )
+ # try to parse date:
+ timeMatch = self.dateDetector.matchTime(line)
+ m = timeMatch[0]
+ if m:
+ s = m.start(1)
+ e = m.end(1)
+ m = line[s:e]
+ tupleLine = (line[:s], m, line[e:])
+ if m: # found and not empty - retrive date:
+ date = self.dateDetector.getTime(m, timeMatch)
+ if date is not None:
+ # Lets get the time part
+ date = date[0]
+ self.__lastTimeText = m
+ self.__lastDate = date
+ else:
+ logSys.error("findFailure failed to parse timeText: %s", m)
+ # matched empty value - date is optional or not available - set it to last known or now:
+ elif self.__lastDate and self.__lastDate > MyTime.time() - 60:
+ # set it to last known:
+ tupleLine = ("", self.__lastTimeText, line)
+ date = self.__lastDate
+ else:
+ # set it to now:
+ date = MyTime.time()
+ else:
+ tupleLine = ("", "", line)
+ # still no date - try to use last known:
+ if date is None:
+ noDate = True
+ if self.__lastDate and self.__lastDate > MyTime.time() - 60:
+ tupleLine = ("", self.__lastTimeText, line)
+ date = self.__lastDate
+ elif self.checkFindTime and self.inOperation:
+ date = MyTime.time()
+
+ if self.checkFindTime and date is not None:
+ # if in operation (modifications have been really found):
+ if self.inOperation:
+ # if weird date - we'd simulate now for timeing issue (too large deviation from now):
+ delta = int(date - MyTime.time())
+ if abs(delta) > 60:
+ # log timing issue as warning once per day:
+ self._logWarnOnce("_next_simByTimeWarn",
+ ("Detected a log entry %s %s the current time in operation mode. "
+ "This looks like a %s problem. Treating such entries as if they just happened.",
+ MyTime.seconds2str(abs(delta)), "before" if delta < 0 else "after",
+ "latency" if -3300 <= delta < 0 else "timezone"
+ ),
+ ("Please check a jail for a timing issue. Line with odd timestamp: %s",
+ line))
+ # simulate now as date:
+ date = MyTime.time()
+ self.__lastDate = date
else:
- tupleLine = (l, "", "", None)
+ # in initialization (restore) phase, if too old - ignore:
+ if date < MyTime.time() - self.getFindTime():
+ # log time zone issue as warning once per day:
+ self._logWarnOnce("_next_ignByTimeWarn",
+ ("Ignoring all log entries older than %ss; these are probably" +
+ " messages generated while fail2ban was not running.",
+ self.getFindTime()),
+ ("Please check a jail for a timing issue. Line with odd timestamp: %s",
+ line))
+ # ignore - too old (obsolete) entry:
+ return []
# save last line (lazy convert of process line tuple to string on demand):
self.processedLine = lambda: "".join(tupleLine[::2])
- return self.findFailure(tupleLine, date)
+ return self.findFailure(tupleLine, date, noDate=noDate)
def processLineAndAdd(self, line, date=None):
"""Processes the line for failures and populates failManager
"""
try:
- for element in self.processLine(line, date):
- ip = element[1]
- unixTime = element[2]
- fail = element[3]
+ for (_, ip, unixTime, fail) in self.processLine(line, date):
logSys.debug("Processing line with time:%s and ip:%s",
unixTime, ip)
+ # ensure the time is not in the future, e. g. by some estimated (assumed) time:
+ if self.checkFindTime and unixTime > MyTime.time():
+ unixTime = MyTime.time()
tick = FailTicket(ip, unixTime, data=fail)
if self._inIgnoreIPList(ip, tick):
continue
logSys.info(
- "[%s] Found %s - %s", self.jailName, ip, datetime.datetime.fromtimestamp(unixTime).strftime("%Y-%m-%d %H:%M:%S")
+ "[%s] Found %s - %s", self.jailName, ip, MyTime.time2str(unixTime)
)
- self.failManager.addFailure(tick)
+ attempts = self.failManager.addFailure(tick)
+ # avoid RC on busy filter (too many failures) - if attempts for IP/ID reached maxretry,
+ # we can speedup ban, so do it as soon as possible:
+ if attempts >= self.failManager.getMaxRetry():
+ self.performBan(ip)
# report to observer - failure was found, for possibly increasing of it retry counter (asynchronous)
if Observers.Main is not None:
- Observers.Main.add('failureFound', self.failManager, self.jail, tick)
+ Observers.Main.add('failureFound', self.jail, tick)
+ self.procLines += 1
+ # every 100 lines check need to perform service tasks:
+ if self.procLines % 100 == 0:
+ self.performSvc()
# reset (halve) error counter (successfully processed line):
if self._errors:
self._errors //= 2
@@ -617,7 +735,7 @@ class Filter(JailThread):
# incr common error counter:
self.commonError()
- def commonError(self):
+ def commonError(self, reason="common", exc=None):
# incr error counter, stop processing (going idle) after 100th error :
self._errors += 1
# sleep a little bit (to get around time-related errors):
@@ -627,20 +745,26 @@ class Filter(JailThread):
self._errors //= 2
self.idle = True
- ##
- # Returns true if the line should be ignored.
- #
- # Uses ignoreregex.
- # @param line: the line
- # @return: a boolean
-
- def ignoreLine(self, tupleLines):
- buf = Regex._tupleLinesBuf(tupleLines)
+ def _ignoreLine(self, buf, orgBuffer, failRegex=None):
+ # if multi-line buffer - use matched only, otherwise (single line) - original buf:
+ if failRegex and self.__lineBufferSize > 1:
+ orgBuffer = failRegex.getMatchedTupleLines()
+ buf = Regex._tupleLinesBuf(orgBuffer)
+ # search ignored:
+ fnd = None
for ignoreRegexIndex, ignoreRegex in enumerate(self.__ignoreRegex):
- ignoreRegex.search(buf, tupleLines)
+ ignoreRegex.search(buf, orgBuffer)
if ignoreRegex.hasMatched():
- return ignoreRegexIndex
- return None
+ fnd = ignoreRegexIndex
+ logSys.log(7, " Matched ignoreregex %d and was ignored", fnd)
+ if self.onIgnoreRegex: self.onIgnoreRegex(fnd, ignoreRegex)
+ # remove ignored match:
+ if not self.checkAllRegex or self.__lineBufferSize > 1:
+ # todo: check ignoreRegex.getUnmatchedTupleLines() would be better (fix testGetFailuresMultiLineIgnoreRegex):
+ if failRegex:
+ self.__lineBuffer = failRegex.getUnmatchedTupleLines()
+ if not self.checkAllRegex: break
+ return fnd
def _updateUsers(self, fail, user=()):
users = fail.get('users')
@@ -650,49 +774,33 @@ class Filter(JailThread):
fail['users'] = users = set()
users.add(user)
return users
- return None
-
- # # ATM incremental (non-empty only) merge deactivated ...
- # @staticmethod
- # def _updateFailure(self, mlfidGroups, fail):
- # # reset old failure-ids when new types of id available in this failure:
- # fids = set()
- # for k in ('fid', 'ip4', 'ip6', 'dns'):
- # if fail.get(k):
- # fids.add(k)
- # if fids:
- # for k in ('fid', 'ip4', 'ip6', 'dns'):
- # if k not in fids:
- # try:
- # del mlfidGroups[k]
- # except:
- # pass
- # # update not empty values:
- # mlfidGroups.update(((k,v) for k,v in fail.iteritems() if v))
+ return users
def _mergeFailure(self, mlfid, fail, failRegex):
mlfidFail = self.mlfidCache.get(mlfid) if self.__mlfidCache else None
users = None
nfflgs = 0
- if fail.get('nofail'): nfflgs |= 1
- if fail.get('mlfforget'): nfflgs |= 2
+ if fail.get("mlfgained"):
+ nfflgs |= (8|1)
+ if not fail.get('nofail'):
+ fail['nofail'] = fail["mlfgained"]
+ elif fail.get('nofail'): nfflgs |= 1
+ if fail.pop('mlfforget', None): nfflgs |= 2
# if multi-line failure id (connection id) known:
if mlfidFail:
mlfidGroups = mlfidFail[1]
# update users set (hold all users of connect):
users = self._updateUsers(mlfidGroups, fail.get('user'))
- # be sure we've correct current state ('nofail' only from last failure)
- try:
- del mlfidGroups['nofail']
- except KeyError:
- pass
- # # ATM incremental (non-empty only) merge deactivated (for future version only),
- # # it can be simulated using alternate value tags, like <F-ALT_VAL>...</F-ALT_VAL>,
- # # so previous value 'val' will be overwritten only if 'alt_val' is not empty...
- # _updateFailure(mlfidGroups, fail)
- #
+ # be sure we've correct current state ('nofail' and 'mlfgained' only from last failure)
+ if mlfidGroups.pop('nofail', None): nfflgs |= 4
+ if mlfidGroups.pop('mlfgained', None): nfflgs |= 4
+ # gained resets all pending failures (retaining users to check it later)
+ if nfflgs & 8: mlfidGroups.pop('mlfpending', None)
+ # if we had no pending failures then clear the matches (they are already provided):
+ if (nfflgs & 4) == 0 and not mlfidGroups.get('mlfpending', 0):
+ mlfidGroups.pop("matches", None)
# overwrite multi-line failure with all values, available in fail:
- mlfidGroups.update(fail)
+ mlfidGroups.update(((k,v) for k,v in fail.iteritems() if v is not None))
# new merged failure data:
fail = mlfidGroups
# if forget (disconnect/reset) - remove cached entry:
@@ -703,23 +811,19 @@ class Filter(JailThread):
mlfidFail = [self.__lastDate, fail]
self.mlfidCache.set(mlfid, mlfidFail)
# check users in order to avoid reset failure by multiple logon-attempts:
- if users and len(users) > 1:
- # we've new user, reset 'nofail' because of multiple users attempts:
- try:
- del fail['nofail']
- except KeyError:
- pass
+ if fail.pop('mlfpending', 0) or users and len(users) > 1:
+ # we've pending failures or new user, reset 'nofail' because of failures or multiple users attempts:
+ fail.pop('nofail', None)
+ fail.pop('mlfgained', None)
+ nfflgs &= ~(8|1) # reset nofail and gained
# merge matches:
- if not fail.get('nofail'): # current state (corresponding users)
- try:
- m = fail.pop("nofail-matches")
- m += fail.get("matches", [])
- except KeyError:
- m = fail.get("matches", [])
- if not (nfflgs & 2): # not mlfforget:
+ if (nfflgs & 1) == 0: # current nofail state (corresponding users)
+ m = fail.pop("nofail-matches", [])
+ m += fail.get("matches", [])
+ if (nfflgs & 8) == 0: # no gain signaled
m += failRegex.getMatchedTupleLines()
fail["matches"] = m
- elif not (nfflgs & 2) and (nfflgs & 1): # not mlfforget and nofail:
+ elif (nfflgs & 3) == 1: # not mlfforget and nofail:
fail["nofail-matches"] = fail.get("nofail-matches", []) + failRegex.getMatchedTupleLines()
# return merged:
return fail
@@ -732,72 +836,41 @@ class Filter(JailThread):
# to find the logging time.
# @return a dict with IP and timestamp.
- def findFailure(self, tupleLine, date=None):
+ def findFailure(self, tupleLine, date, noDate=False):
failList = list()
ll = logSys.getEffectiveLevel()
- returnRawHost = self.returnRawHost
- cidr = IPAddr.CIDR_UNSPEC
- if self.__useDns == "raw":
- returnRawHost = True
- cidr = IPAddr.CIDR_RAW
-
- # Checks if we mut ignore this line.
- if self.ignoreLine([tupleLine[::2]]) is not None:
- # The ignoreregex matched. Return.
- if ll <= 7: logSys.log(7, "Matched ignoreregex and was \"%s\" ignored",
- "".join(tupleLine[::2]))
- return failList
-
- timeText = tupleLine[1]
- if date:
- self.__lastTimeText = timeText
- self.__lastDate = date
- elif timeText:
-
- dateTimeMatch = self.dateDetector.getTime(timeText, tupleLine[3])
-
- if dateTimeMatch is None:
- logSys.error("findFailure failed to parse timeText: %s", timeText)
- date = self.__lastDate
-
- else:
- # Lets get the time part
- date = dateTimeMatch[0]
-
- self.__lastTimeText = timeText
- self.__lastDate = date
- else:
- timeText = self.__lastTimeText or "".join(tupleLine[::2])
- date = self.__lastDate
-
- if self.checkFindTime and date is not None and date < MyTime.time() - self.getFindTime():
- if ll <= 5: logSys.log(5, "Ignore line since time %s < %s - %s",
- date, MyTime.time(), self.getFindTime())
- return failList
+ defcidr = IPAddr.CIDR_UNSPEC
+ if self.__useDns == "raw" or self.returnRawHost:
+ defcidr = IPAddr.CIDR_RAW
if self.__lineBufferSize > 1:
- orgBuffer = self.__lineBuffer = (
- self.__lineBuffer + [tupleLine[:3]])[-self.__lineBufferSize:]
+ self.__lineBuffer.append(tupleLine)
+ orgBuffer = self.__lineBuffer = self.__lineBuffer[-self.__lineBufferSize:]
else:
- orgBuffer = self.__lineBuffer = [tupleLine[:3]]
- if ll <= 5: logSys.log(5, "Looking for match of %r", self.__lineBuffer)
- buf = Regex._tupleLinesBuf(self.__lineBuffer)
+ orgBuffer = self.__lineBuffer = [tupleLine]
+ if ll <= 5: logSys.log(5, "Looking for match of %r", orgBuffer)
+ buf = Regex._tupleLinesBuf(orgBuffer)
+
+ # Checks if we must ignore this line (only if fewer ignoreregex than failregex).
+ if self.__ignoreRegex and len(self.__ignoreRegex) < len(self.__failRegex) - 2:
+ if self._ignoreLine(buf, orgBuffer) is not None:
+ # The ignoreregex matched. Return.
+ return failList
# Pre-filter fail regex (if available):
preGroups = {}
if self.__prefRegex:
if ll <= 5: logSys.log(5, " Looking for prefregex %r", self.__prefRegex.getRegex())
- self.__prefRegex.search(buf, self.__lineBuffer)
+ self.__prefRegex.search(buf, orgBuffer)
if not self.__prefRegex.hasMatched():
if ll <= 5: logSys.log(5, " Prefregex not matched")
return failList
preGroups = self.__prefRegex.getGroups()
if ll <= 7: logSys.log(7, " Pre-filter matched %s", preGroups)
- repl = preGroups.get('content')
+ repl = preGroups.pop('content', None)
# Content replacement:
if repl:
- del preGroups['content']
self.__lineBuffer, buf = [('', '', repl)], None
# Iterates over all the regular expressions.
@@ -815,31 +888,25 @@ class Filter(JailThread):
# The failregex matched.
if ll <= 7: logSys.log(7, " Matched failregex %d: %s", failRegexIndex, fail)
# Checks if we must ignore this match.
- if self.ignoreLine(failRegex.getMatchedTupleLines()) \
- is not None:
+ if self.__ignoreRegex and self._ignoreLine(buf, orgBuffer, failRegex) is not None:
# The ignoreregex matched. Remove ignored match.
- self.__lineBuffer, buf = failRegex.getUnmatchedTupleLines(), None
- if ll <= 7: logSys.log(7, " Matched ignoreregex and was ignored")
+ buf = None
if not self.checkAllRegex:
break
- else:
- continue
- if date is None:
- logSys.warning(
- "Found a match for %r but no valid date/time "
- "found for %r. Please try setting a custom "
- "date pattern (see man page jail.conf(5)). "
- "If format is complex, please "
- "file a detailed issue on"
- " https://github.com/fail2ban/fail2ban/issues "
- "in order to get support for this format.",
- "\n".join(failRegex.getMatchedLines()), timeText)
continue
+ if noDate:
+ self._logWarnOnce("_next_noTimeWarn",
+ ("Found a match but no valid date/time found for %r.", tupleLine[1]),
+ ("Match without a timestamp: %s", "\n".join(failRegex.getMatchedLines())),
+ ("Please try setting a custom date pattern (see man page jail.conf(5)).",)
+ )
+ if date is None and self.checkFindTime: continue
# we should check all regex (bypass on multi-line, otherwise too complex):
- if not self.checkAllRegex or self.getMaxLines() > 1:
+ if not self.checkAllRegex or self.__lineBufferSize > 1:
self.__lineBuffer, buf = failRegex.getUnmatchedTupleLines(), None
# merge data if multi-line failure:
- raw = returnRawHost
+ cidr = defcidr
+ raw = (defcidr == IPAddr.CIDR_RAW)
if preGroups:
currFail, fail = fail, preGroups.copy()
fail.update(currFail)
@@ -858,44 +925,50 @@ class Filter(JailThread):
# failure-id:
fid = fail.get('fid')
# ip-address or host:
- host = fail.get('ip4')
- if host is not None:
- cidr = IPAddr.FAM_IPv4
+ ip = fail.get('ip4')
+ if ip is not None:
+ cidr = int(fail.get('cidr') or IPAddr.FAM_IPv4)
raw = True
else:
- host = fail.get('ip6')
- if host is not None:
- cidr = IPAddr.FAM_IPv6
+ ip = fail.get('ip6')
+ if ip is not None:
+ cidr = int(fail.get('cidr') or IPAddr.FAM_IPv6)
raw = True
- if host is None:
- host = fail.get('dns')
- if host is None:
- # first try to check we have mlfid case (cache connection id):
- if fid is None and mlfid is None:
- # if no failure-id also (obscure case, wrong regex), throw error inside getFailID:
- fid = failRegex.getFailID()
- host = fid
- cidr = IPAddr.CIDR_RAW
+ else:
+ ip = fail.get('dns')
+ if ip is None:
+ # first try to check we have mlfid case (cache connection id):
+ if fid is None and mlfid is None:
+ # if no failure-id also (obscure case, wrong regex), throw error inside getFailID:
+ fid = failRegex.getFailID()
+ ip = fid
+ raw = True
# if mlfid case (not failure):
- if host is None:
+ if ip is None:
if ll <= 7: logSys.log(7, "No failure-id by mlfid %r in regex %s: %s",
mlfid, failRegexIndex, fail.get('mlfforget', "waiting for identifier"))
- if not self.checkAllRegex: return failList
- ips = [None]
+ fail['mlfpending'] = 1; # mark failure is pending
+ if not self.checkAllRegex and self.ignorePending: return failList
+ fids = [None]
# if raw - add single ip or failure-id,
# otherwise expand host to multiple ips using dns (or ignore it if not valid):
elif raw:
- ip = IPAddr(host, cidr)
- # check host equal failure-id, if not - failure with complex id:
- if fid is not None and fid != host:
- ip = IPAddr(fid, IPAddr.CIDR_RAW)
- ips = [ip]
+ # check ip/host equal failure-id, if not - failure with complex id:
+ if fid is None or fid == ip:
+ fid = IPAddr(ip, cidr)
+ else:
+ fail['ip'] = IPAddr(ip, cidr)
+ fid = IPAddr(fid, defcidr)
+ fids = [fid]
# otherwise, try to use dns conversion:
else:
- ips = DNSUtils.textToIp(host, self.__useDns)
+ fids = DNSUtils.textToIp(ip, self.__useDns)
+ # if checkAllRegex we must make a copy (to be sure next RE doesn't change merged/cached failure):
+ if self.checkAllRegex and mlfid is not None:
+ fail = fail.copy()
# append failure with match to the list:
- for ip in ips:
- failList.append([failRegexIndex, ip, date, fail])
+ for fid in fids:
+ failList.append([failRegexIndex, fid, date, fail])
if not self.checkAllRegex:
break
except RegexException as e: # pragma: no cover - unsure if reachable
@@ -938,7 +1011,7 @@ class FileFilter(Filter):
log.setPos(lastpos)
self.__logs[path] = log
logSys.info("Added logfile: %r (pos = %s, hash = %s)" , path, log.getPos(), log.getHash())
- if autoSeek:
+ if autoSeek and not tail:
self.__autoSeek[path] = autoSeek
self._addLogPath(path) # backend specific
@@ -957,9 +1030,6 @@ class FileFilter(Filter):
log = self.__logs.pop(path)
except KeyError:
return
- db = self.jail.database
- if db is not None:
- db.updateLog(self.jail, log)
logSys.info("Removed logfile: %r", path)
self._delLogPath(path)
return
@@ -1022,7 +1092,8 @@ class FileFilter(Filter):
# MyTime.time()-self.findTime. When a failure is detected, a FailTicket
# is created and is added to the FailManager.
- def getFailures(self, filename):
+ def getFailures(self, filename, inOperation=None):
+ if self.idle: return False
log = self.getLog(filename)
if log is None:
logSys.error("Unable to get failures in %s", filename)
@@ -1067,15 +1138,26 @@ class FileFilter(Filter):
if has_content:
while not self.idle:
line = log.readline()
- if not line or not self.active:
- # The jail reached the bottom or has been stopped
+ if not self.active: break; # jail has been stopped
+ if line is None:
+ # The jail reached the bottom, simply set in operation for this log
+ # (since we are first time at end of file, growing is only possible after modifications):
+ log.inOperation = True
break
+ # acquire in operation from log and process:
+ self.inOperation = inOperation if inOperation is not None else log.inOperation
self.processLineAndAdd(line)
finally:
log.close()
- db = self.jail.database
- if db is not None:
- db.updateLog(self.jail, log)
+ if self.jail.database is not None:
+ self._pendDBUpdates[log] = 1
+ if (
+ self.ticks % 100 == 0
+ or MyTime.time() >= self._nextUpdateTM
+ or not self.active
+ ):
+ self._updateDBPending()
+ self._nextUpdateTM = MyTime.time() + Utils.DEFAULT_SLEEP_TIME * 5
return True
##
@@ -1086,7 +1168,9 @@ class FileFilter(Filter):
fs = container.getFileSize()
if logSys.getEffectiveLevel() <= logging.DEBUG:
logSys.debug("Seek to find time %s (%s), file size %s", date,
- datetime.datetime.fromtimestamp(date).strftime("%Y-%m-%d %H:%M:%S"), fs)
+ MyTime.time2str(date), fs)
+ if not fs:
+ return
minp = container.getPos()
maxp = fs
tryPos = minp
@@ -1110,8 +1194,8 @@ class FileFilter(Filter):
dateTimeMatch = None
nextp = None
while True:
- line = container.readline()
- if not line:
+ line = container.readline(False)
+ if line is None:
break
(timeMatch, template) = self.dateDetector.matchTime(line)
if timeMatch:
@@ -1165,7 +1249,7 @@ class FileFilter(Filter):
container.setPos(foundPos)
if logSys.getEffectiveLevel() <= logging.DEBUG:
logSys.debug("Position %s from %s, found time %s (%s) within %s seeks", lastPos, fs, foundTime,
- (datetime.datetime.fromtimestamp(foundTime).strftime("%Y-%m-%d %H:%M:%S") if foundTime is not None else ''), cntr)
+ (MyTime.time2str(foundTime) if foundTime is not None else ''), cntr)
def status(self, flavor="basic"):
"""Status of Filter plus files being monitored.
@@ -1175,12 +1259,33 @@ class FileFilter(Filter):
ret.append(("File list", path))
return ret
- def stop(self):
- """Stop monitoring of log-file(s)
+ def _updateDBPending(self):
+ """Apply pending updates (log position) to database.
+ """
+ db = self.jail.database
+ while True:
+ try:
+ log, args = self._pendDBUpdates.popitem()
+ except KeyError:
+ break
+ db.updateLog(self.jail, log)
+
+ def onStop(self):
+ """Stop monitoring of log-file(s). Invoked after run method.
"""
+ # ensure positions of pending logs are up-to-date:
+ if self._pendDBUpdates and self.jail.database:
+ self._updateDBPending()
# stop files monitoring:
for path in self.__logs.keys():
self.delLogPath(path)
+
+ def stop(self):
+ """Stop filter
+ """
+ # normally onStop will be called automatically in thread after its run ends,
+ # but for backwards compatibilities we'll invoke it in caller of stop method.
+ self.onStop()
# stop thread:
super(Filter, self).stop()
@@ -1208,32 +1313,56 @@ except ImportError: # pragma: no cover
class FileContainer:
- def __init__(self, filename, encoding, tail = False):
+ def __init__(self, filename, encoding, tail=False, doOpen=False):
self.__filename = filename
+ self.waitForLineEnd = True
self.setEncoding(encoding)
self.__tail = tail
self.__handler = None
+ self.__pos = 0
+ self.__pos4hash = 0
+ self.__hash = ''
+ self.__hashNextTime = time.time() + 30
# Try to open the file. Raises an exception if an error occurred.
handler = open(filename, 'rb')
- stats = os.fstat(handler.fileno())
- self.__ino = stats.st_ino
+ if doOpen: # fail2ban-regex only (don't need to reopen it and check for rotation)
+ self.__handler = handler
+ return
try:
- firstLine = handler.readline()
- # Computes the MD5 of the first line.
- self.__hash = md5sum(firstLine).hexdigest()
- # Start at the beginning of file if tail mode is off.
- if tail:
- handler.seek(0, 2)
- self.__pos = handler.tell()
- else:
- self.__pos = 0
+ stats = os.fstat(handler.fileno())
+ self.__ino = stats.st_ino
+ if stats.st_size:
+ firstLine = handler.readline()
+ # first line available and contains new-line:
+ if firstLine != firstLine.rstrip(b'\r\n'):
+ # Computes the MD5 of the first line.
+ self.__hash = md5sum(firstLine).hexdigest()
+ # if tail mode scroll to the end of file
+ if tail:
+ handler.seek(0, 2)
+ self.__pos = handler.tell()
finally:
handler.close()
+ ## shows that log is in operation mode (expecting new messages only from here):
+ self.inOperation = tail
+
+ def __hash__(self):
+ return hash(self.__filename)
+ def __eq__(self, other):
+ return (id(self) == id(other) or
+ self.__filename == (other.__filename if isinstance(other, FileContainer) else other)
+ )
+ def __repr__(self):
+ return 'file-log:'+self.__filename
def getFileName(self):
return self.__filename
def getFileSize(self):
+ h = self.__handler
+ if h is not None:
+ stats = os.fstat(h.fileno())
+ return stats.st_size
return os.path.getsize(self.__filename);
def setEncoding(self, encoding):
@@ -1252,38 +1381,54 @@ class FileContainer:
def setPos(self, value):
self.__pos = value
- def open(self):
- self.__handler = open(self.__filename, 'rb')
- # Set the file descriptor to be FD_CLOEXEC
- fd = self.__handler.fileno()
- flags = fcntl.fcntl(fd, fcntl.F_GETFD)
- fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
- # Stat the file before even attempting to read it
- stats = os.fstat(self.__handler.fileno())
- if not stats.st_size:
- # yoh: so it is still an empty file -- nothing should be
- # read from it yet
- # print "D: no content -- return"
- return False
- firstLine = self.__handler.readline()
- # Computes the MD5 of the first line.
- myHash = md5sum(firstLine).hexdigest()
- ## print "D: fn=%s hashes=%s/%s inos=%s/%s pos=%s rotate=%s" % (
- ## self.__filename, self.__hash, myHash, stats.st_ino, self.__ino, self.__pos,
- ## self.__hash != myHash or self.__ino != stats.st_ino)
- ## sys.stdout.flush()
- # Compare hash and inode
- if self.__hash != myHash or self.__ino != stats.st_ino:
- logSys.log(logging.MSG, "Log rotation detected for %s", self.__filename)
- self.__hash = myHash
- self.__ino = stats.st_ino
- self.__pos = 0
- # Sets the file pointer to the last position.
- self.__handler.seek(self.__pos)
+ def open(self, forcePos=None):
+ h = open(self.__filename, 'rb')
+ try:
+ # Set the file descriptor to be FD_CLOEXEC
+ fd = h.fileno()
+ flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
+ myHash = self.__hash
+ # Stat the file before even attempting to read it
+ stats = os.fstat(h.fileno())
+ rotflg = stats.st_size < self.__pos or stats.st_ino != self.__ino
+ if rotflg or not len(myHash) or time.time() > self.__hashNextTime:
+ myHash = ''
+ firstLine = h.readline()
+ # Computes the MD5 of the first line (if it is complete)
+ if firstLine != firstLine.rstrip(b'\r\n'):
+ myHash = md5sum(firstLine).hexdigest()
+ self.__hashNextTime = time.time() + 30
+ elif stats.st_size == self.__pos:
+ myHash = self.__hash
+ # Compare size, hash and inode
+ if rotflg or myHash != self.__hash:
+ if self.__hash != '':
+ logSys.log(logging.MSG, "Log rotation detected for %s, reason: %r", self.__filename,
+ (stats.st_size, self.__pos, stats.st_ino, self.__ino, myHash, self.__hash))
+ self.__ino = stats.st_ino
+ self.__pos = 0
+ self.__hash = myHash
+ # if nothing to read from file yet (empty or no new data):
+ if forcePos is not None:
+ self.__pos = forcePos
+ elif stats.st_size <= self.__pos:
+ return False
+ # Sets the file pointer to the last position.
+ h.seek(self.__pos)
+ # leave file open (to read content):
+ self.__handler = h; h = None
+ finally:
+ # close (no content or error only)
+ if h:
+ h.close(); h = None
return True
def seek(self, offs, endLine=True):
h = self.__handler
+ if h is None:
+ self.open(offs)
+ h = self.__handler
# seek to given position
h.seek(offs, 0)
# goto end of next line
@@ -1301,38 +1446,98 @@ class FileContainer:
try:
return line.decode(enc, 'strict')
except (UnicodeDecodeError, UnicodeEncodeError) as e:
+ # avoid warning if got incomplete end of line (e. g. '\n' in "...[0A" followed by "00]..." for utf-16le:
+ if (e.end == len(line) and line[e.start] in b'\r\n'):
+ return line[0:e.start].decode(enc, 'replace')
global _decode_line_warn
- lev = logging.DEBUG
- if _decode_line_warn.get(filename, 0) <= MyTime.time():
+ lev = 7
+ if not _decode_line_warn.get(filename, 0):
lev = logging.WARNING
- _decode_line_warn[filename] = MyTime.time() + 24*60*60
+ _decode_line_warn.set(filename, 1)
logSys.log(lev,
- "Error decoding line from '%s' with '%s'."
- " Consider setting logencoding=utf-8 (or another appropriate"
- " encoding) for this jail. Continuing"
- " to process line ignoring invalid characters: %r",
- filename, enc, line)
+ "Error decoding line from '%s' with '%s'.", filename, enc)
+ if logSys.getEffectiveLevel() <= lev:
+ logSys.log(lev,
+ "Consider setting logencoding to appropriate encoding for this jail. "
+ "Continuing to process line ignoring invalid characters: %r",
+ line)
# decode with replacing error chars:
line = line.decode(enc, 'replace')
return line
- def readline(self):
+ def readline(self, complete=True):
+ """Read line from file
+
+ In opposite to pythons readline it doesn't return new-line,
+ so returns either the line if line is complete (and complete=True) or None
+ if line is not complete (and complete=True) or there is no content to read.
+ If line is complete (and complete is True), it also shift current known
+ position to begin of next line.
+
+ Also it is safe against interim new-line bytes (e. g. part of multi-byte char)
+ in given encoding.
+ """
if self.__handler is None:
return ""
- return FileContainer.decode_line(
- self.getFileName(), self.getEncoding(), self.__handler.readline())
+ # read raw bytes up to \n char:
+ b = self.__handler.readline()
+ if not b:
+ return None
+ bl = len(b)
+ # convert to log-encoding (new-line char could disappear if it is part of multi-byte sequence):
+ r = FileContainer.decode_line(
+ self.getFileName(), self.getEncoding(), b)
+ # trim new-line at end and check the line was written complete (contains a new-line):
+ l = r.rstrip('\r\n')
+ if complete:
+ if l == r:
+ # try to fill buffer in order to find line-end in log encoding:
+ fnd = 0
+ while 1:
+ r = self.__handler.readline()
+ if not r:
+ break
+ b += r
+ bl += len(r)
+ # convert to log-encoding:
+ r = FileContainer.decode_line(
+ self.getFileName(), self.getEncoding(), b)
+ # ensure new-line is not in the middle (buffered 2 strings, e. g. in utf-16le it is "...[0A"+"00]..."):
+ e = r.find('\n')
+ if e >= 0 and e != len(r)-1:
+ l, r = r[0:e], r[0:e+1]
+ # back to bytes and get offset to seek after NL:
+ r = r.encode(self.getEncoding(), 'replace')
+ self.__handler.seek(-bl+len(r), 1)
+ return l
+ # trim new-line at end and check the line was written complete (contains a new-line):
+ l = r.rstrip('\r\n')
+ if l != r:
+ return l
+ if self.waitForLineEnd:
+ # not fulfilled - seek back and return:
+ self.__handler.seek(-bl, 1)
+ return None
+ return l
def close(self):
- if not self.__handler is None:
- # Saves the last position.
+ if self.__handler is not None:
+ # Saves the last real position.
self.__pos = self.__handler.tell()
# Closes the file.
self.__handler.close()
self.__handler = None
- ## print "D: Closed %s with pos %d" % (handler, self.__pos)
- ## sys.stdout.flush()
-_decode_line_warn = {}
+ def __iter__(self):
+ return self
+ def next(self):
+ line = self.readline()
+ if line is None:
+ self.close()
+ raise StopIteration
+ return line
+
+_decode_line_warn = Utils.Cache(maxCount=1000, maxTime=24*60*60);
##
diff --git a/fail2ban/server/filtergamin.py b/fail2ban/server/filtergamin.py
index 3baf8c54..c5373445 100644
--- a/fail2ban/server/filtergamin.py
+++ b/fail2ban/server/filtergamin.py
@@ -55,7 +55,6 @@ class FilterGamin(FileFilter):
def __init__(self, jail):
FileFilter.__init__(self, jail)
- self.__modified = False
# Gamin monitor
self.monitor = gamin.WatchMonitor()
fd = self.monitor.get_fd()
@@ -64,28 +63,12 @@ class FilterGamin(FileFilter):
logSys.debug("Created FilterGamin")
def callback(self, path, event):
- logSys.debug("Got event: " + repr(event) + " for " + path)
+ logSys.log(4, "Got event: " + repr(event) + " for " + path)
if event in (gamin.GAMCreated, gamin.GAMChanged, gamin.GAMExists):
logSys.debug("File changed: " + path)
- self.__modified = True
self.ticks += 1
- self._process_file(path)
-
- def _process_file(self, path):
- """Process a given file
-
- TODO -- RF:
- this is a common logic and must be shared/provided by FileFilter
- """
self.getFailures(path)
- try:
- while True:
- ticket = self.failManager.toBan()
- self.jail.putFailTicket(ticket)
- except FailManagerEmpty:
- self.failManager.cleanup(MyTime.time())
- self.__modified = False
##
# Add a log file path
@@ -132,6 +115,9 @@ class FilterGamin(FileFilter):
Utils.wait_for(lambda: not self.active or self._handleEvents(),
self.sleeptime)
self.ticks += 1
+ if self.ticks % 10 == 0:
+ self.performSvc()
+
logSys.debug("[%s] filter terminated", self.jailName)
return True
diff --git a/fail2ban/server/filterpoll.py b/fail2ban/server/filterpoll.py
index 5905c5b5..196955e5 100644
--- a/fail2ban/server/filterpoll.py
+++ b/fail2ban/server/filterpoll.py
@@ -27,9 +27,7 @@ __license__ = "GPL"
import os
import time
-from .failmanager import FailManagerEmpty
from .filter import FileFilter
-from .mytime import MyTime
from .utils import Utils
from ..helpers import getLogger, logging
@@ -55,7 +53,6 @@ class FilterPoll(FileFilter):
def __init__(self, jail):
FileFilter.__init__(self, jail)
- self.__modified = False
## The time of the last modification of the file.
self.__prevStats = dict()
self.__file404Cnt = dict()
@@ -98,8 +95,8 @@ class FilterPoll(FileFilter):
def run(self):
while self.active:
try:
- if logSys.getEffectiveLevel() <= 6:
- logSys.log(6, "Woke up idle=%s with %d files monitored",
+ if logSys.getEffectiveLevel() <= 4:
+ logSys.log(4, "Woke up idle=%s with %d files monitored",
self.idle, self.getLogCount())
if self.idle:
if not Utils.wait_for(lambda: not self.active or not self.idle,
@@ -111,26 +108,21 @@ class FilterPoll(FileFilter):
modlst = []
Utils.wait_for(lambda: not self.active or self.getModified(modlst),
self.sleeptime)
+ if not self.active: # pragma: no cover - timing
+ break
for filename in modlst:
self.getFailures(filename)
- self.__modified = True
self.ticks += 1
- if self.__modified:
- try:
- while True:
- ticket = self.failManager.toBan()
- self.jail.putFailTicket(ticket)
- except FailManagerEmpty:
- self.failManager.cleanup(MyTime.time())
- self.__modified = False
+ if self.ticks % 10 == 0:
+ self.performSvc()
except Exception as e: # pragma: no cover
if not self.active: # if not active - error by stop...
break
logSys.error("Caught unhandled exception in main cycle: %r", e,
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
# incr common error counter:
- self.commonError()
+ self.commonError("unhandled", e)
logSys.debug("[%s] filter terminated", self.jailName)
return True
@@ -144,11 +136,11 @@ class FilterPoll(FileFilter):
try:
logStats = os.stat(filename)
stats = logStats.st_mtime, logStats.st_ino, logStats.st_size
- pstats = self.__prevStats.get(filename, (0))
- if logSys.getEffectiveLevel() <= 5:
+ pstats = self.__prevStats.get(filename, (0,))
+ if logSys.getEffectiveLevel() <= 4:
# we do not want to waste time on strftime etc if not necessary
dt = logStats.st_mtime - pstats[0]
- logSys.log(5, "Checking %s for being modified. Previous/current stats: %s / %s. dt: %s",
+ logSys.log(4, "Checking %s for being modified. Previous/current stats: %s / %s. dt: %s",
filename, pstats, stats, dt)
# os.system("stat %s | grep Modify" % filename)
self.__file404Cnt[filename] = 0
diff --git a/fail2ban/server/filterpyinotify.py b/fail2ban/server/filterpyinotify.py
index 4f3262b6..16b6cfd5 100644
--- a/fail2ban/server/filterpyinotify.py
+++ b/fail2ban/server/filterpyinotify.py
@@ -75,7 +75,6 @@ class FilterPyinotify(FileFilter):
def __init__(self, jail):
FileFilter.__init__(self, jail)
- self.__modified = False
# Pyinotify watch manager
self.__monitor = pyinotify.WatchManager()
self.__notifier = None
@@ -87,7 +86,7 @@ class FilterPyinotify(FileFilter):
logSys.debug("Created FilterPyinotify")
def callback(self, event, origin=''):
- logSys.log(7, "[%s] %sCallback for Event: %s", self.jailName, origin, event)
+ logSys.log(4, "[%s] %sCallback for Event: %s", self.jailName, origin, event)
path = event.pathname
# check watching of this path:
isWF = False
@@ -140,13 +139,6 @@ class FilterPyinotify(FileFilter):
"""
if not self.idle:
self.getFailures(path)
- try:
- while True:
- ticket = self.failManager.toBan()
- self.jail.putFailTicket(ticket)
- except FailManagerEmpty:
- self.failManager.cleanup(MyTime.time())
- self.__modified = False
def _addPending(self, path, reason, isDir=False):
if path not in self.__pending:
@@ -173,7 +165,7 @@ class FilterPyinotify(FileFilter):
return
found = {}
minTime = 60
- for path, (retardTM, isDir) in self.__pending.iteritems():
+ for path, (retardTM, isDir) in list(self.__pending.items()):
if ntm - self.__pendingChkTime < retardTM:
if minTime > retardTM: minTime = retardTM
continue
@@ -192,10 +184,11 @@ class FilterPyinotify(FileFilter):
for path, isDir in found.iteritems():
self._delPending(path)
# refresh monitoring of this:
- self._refreshWatcher(path, isDir=isDir)
+ if isDir is not None:
+ self._refreshWatcher(path, isDir=isDir)
if isDir:
# check all files belong to this dir:
- for logpath in self.__watchFiles:
+ for logpath in list(self.__watchFiles):
if logpath.startswith(path + pathsep):
# if still no file - add to pending, otherwise refresh and process:
if not os.path.isfile(logpath):
@@ -275,28 +268,32 @@ class FilterPyinotify(FileFilter):
def _addLogPath(self, path):
self._addFileWatcher(path)
- self._process_file(path)
+ # notify (wake up if in waiting):
+ if self.active:
+ self.__pendingMinTime = 0
+ # retard until filter gets started, isDir=None signals special case: process file only (don't need to refresh monitor):
+ self._addPending(path, ('INITIAL', path), isDir=None)
- ##
+ ##
# Delete a log path
#
# @param path the log file to delete
def _delLogPath(self, path):
+ self._delPending(path)
if not self._delFileWatcher(path): # pragma: no cover
logSys.error("Failed to remove watch on path: %s", path)
- self._delPending(path)
path_dir = dirname(path)
- for k in self.__watchFiles:
+ for k in list(self.__watchFiles):
if k.startswith(path_dir + pathsep):
path_dir = None
break
if path_dir:
# Remove watches for the directory
# since there is no other monitored file under this directory
- self._delDirWatcher(path_dir)
self._delPending(path_dir)
+ self._delDirWatcher(path_dir)
# pyinotify.ProcessEvent default handler:
def __process_default(self, event):
@@ -342,16 +339,26 @@ class FilterPyinotify(FileFilter):
self.__notifier.process_events()
# wait for events / timeout:
- notify_maxtout = self.__notify_maxtout
def __check_events():
- return not self.active or self.__notifier.check_events(timeout=notify_maxtout)
- if Utils.wait_for(__check_events, min(self.sleeptime, self.__pendingMinTime)):
+ return (
+ not self.active
+ or bool(self.__notifier.check_events(timeout=self.__notify_maxtout))
+ or (self.__pendingMinTime and self.__pending)
+ )
+ wres = Utils.wait_for(__check_events, min(self.sleeptime, self.__pendingMinTime))
+ if wres:
if not self.active: break
- self.__notifier.read_events()
+ if not isinstance(wres, dict):
+ self.__notifier.read_events()
+
+ self.ticks += 1
# check pending files/dirs (logrotate ready):
- if not self.idle:
- self._checkPending()
+ if self.idle:
+ continue
+ self._checkPending()
+ if self.ticks % 10 == 0:
+ self.performSvc()
except Exception as e: # pragma: no cover
if not self.active: # if not active - error by stop...
@@ -359,10 +366,8 @@ class FilterPyinotify(FileFilter):
logSys.error("Caught unhandled exception in main cycle: %r", e,
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
# incr common error counter:
- self.commonError()
+ self.commonError("unhandled", e)
- self.ticks += 1
-
logSys.debug("[%s] filter exited (pyinotifier)", self.jailName)
self.__notifier = None
diff --git a/fail2ban/server/filtersystemd.py b/fail2ban/server/filtersystemd.py
index f87fdb4e..a83b7a13 100644
--- a/fail2ban/server/filtersystemd.py
+++ b/fail2ban/server/filtersystemd.py
@@ -22,7 +22,7 @@ __author__ = "Steven Hiscocks"
__copyright__ = "Copyright (c) 2013 Steven Hiscocks"
__license__ = "GPL"
-import datetime
+import os
import time
from distutils.version import LooseVersion
@@ -86,9 +86,18 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
files.extend(glob.glob(p))
args['files'] = list(set(files))
+ # Default flags is SYSTEM_ONLY(4). This would lead to ignore user session files,
+ # so can prevent "Too many open files" errors on a lot of user sessions (see gh-2392):
try:
args['flags'] = int(kwargs.pop('journalflags'))
except KeyError:
+ # be sure all journal types will be opened if files/path specified (don't set flags):
+ if ('files' not in args or not len(args['files'])) and ('path' not in args or not args['path']):
+ args['flags'] = int(os.getenv("F2B_SYSTEMD_DEFAULT_FLAGS", 4))
+
+ try:
+ args['namespace'] = kwargs.pop('namespace')
+ except KeyError:
pass
return args
@@ -186,6 +195,13 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
def getJournalReader(self):
return self.__journal
+ def getJrnEntTime(self, logentry):
+ """ Returns time of entry as tuple (ISO-str, Posix)."""
+ date = logentry.get('_SOURCE_REALTIME_TIMESTAMP')
+ if date is None:
+ date = logentry.get('__REALTIME_TIMESTAMP')
+ return (date.isoformat(), time.mktime(date.timetuple()) + date.microsecond/1.0E6)
+
##
# Format journal log entry into syslog style
#
@@ -208,12 +224,18 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
if not v:
v = logentry.get('_PID')
if v:
- logelements[-1] += ("[%i]" % v)
+ try: # [integer] (if already numeric):
+ v = "[%i]" % v
+ except TypeError:
+ try: # as [integer] (try to convert to int):
+ v = "[%i]" % int(v, 0)
+ except (TypeError, ValueError): # fallback - [string] as it is
+ v = "[%s]" % v
+ logelements[-1] += v
logelements[-1] += ":"
if logelements[-1] == "kernel:":
- if '_SOURCE_MONOTONIC_TIMESTAMP' in logentry:
- monotonic = logentry.get('_SOURCE_MONOTONIC_TIMESTAMP')
- else:
+ monotonic = logentry.get('_SOURCE_MONOTONIC_TIMESTAMP')
+ if monotonic is None:
monotonic = logentry.get('__MONOTONIC_TIMESTAMP')[0]
logelements.append("[%12.6f]" % monotonic.total_seconds())
msg = logentry.get('MESSAGE','')
@@ -224,19 +246,21 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
logline = " ".join(logelements)
- date = logentry.get('_SOURCE_REALTIME_TIMESTAMP',
- logentry.get('__REALTIME_TIMESTAMP'))
+ date = self.getJrnEntTime(logentry)
logSys.log(5, "[%s] Read systemd journal entry: %s %s", self.jailName,
- date.isoformat(), logline)
+ date[0], logline)
## use the same type for 1st argument:
- return ((logline[:0], date.isoformat(), logline),
- time.mktime(date.timetuple()) + date.microsecond/1.0E6)
+ return ((logline[:0], date[0] + ' ', logline.replace('\n', '\\n')), date[1])
def seekToTime(self, date):
- if not isinstance(date, datetime.datetime):
- date = datetime.datetime.fromtimestamp(date)
+ if isinstance(date, (int, long)):
+ date = float(date)
self.__journal.seek_realtime(date)
+ def inOperationMode(self):
+ self.inOperation = True
+ logSys.info("[%s] Jail is in operation now (process new journal entries)", self.jailName)
+
##
# Main loop.
#
@@ -247,14 +271,40 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
if not self.getJournalMatch():
logSys.notice(
- "Jail started without 'journalmatch' set. "
+ "[%s] Jail started without 'journalmatch' set. "
"Jail regexs will be checked against all journal entries, "
- "which is not advised for performance reasons.")
+ "which is not advised for performance reasons.", self.jailName)
+
+ # Save current cursor position (to recognize in operation mode):
+ logentry = None
+ try:
+ self.__journal.seek_tail()
+ logentry = self.__journal.get_previous()
+ if logentry:
+ self.__journal.get_next()
+ except OSError:
+ logentry = None # Reading failure, so safe to ignore
+ if logentry:
+ # Try to obtain the last known time (position of journal)
+ startTime = 0
+ if self.jail.database is not None:
+ startTime = self.jail.database.getJournalPos(self.jail, 'systemd-journal') or 0
+ # Seek to max(last_known_time, now - findtime) in journal
+ startTime = max( startTime, MyTime.time() - int(self.getFindTime()) )
+ self.seekToTime(startTime)
+ # Not in operation while we'll read old messages ...
+ self.inOperation = False
+ # Save current time in order to check time to switch "in operation" mode
+ startTime = (1, MyTime.time(), logentry.get('__CURSOR'))
+ else:
+ # empty journal or no entries for current filter:
+ self.inOperationMode()
+ # seek_tail() seems to have a bug by no entries (could bypass some entries hereafter), so seek to now instead:
+ startTime = MyTime.time()
+ self.seekToTime(startTime)
+ # for possible future switches of in-operation mode:
+ startTime = (0, startTime)
- # Seek to now - findtime in journal
- start_time = datetime.datetime.now() - \
- datetime.timedelta(seconds=int(self.getFindTime()))
- self.seekToTime(start_time)
# Move back one entry to ensure do not end up in dead space
# if start time beyond end of journal
try:
@@ -262,18 +312,37 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
except OSError:
pass # Reading failure, so safe to ignore
+ wcode = journal.NOP
+ line = None
while self.active:
# wait for records (or for timeout in sleeptime seconds):
try:
- ## todo: find better method as wait_for to break (e.g. notify) journal.wait(self.sleeptime),
- ## don't use `journal.close()` for it, because in some python/systemd implementation it may
- ## cause abnormal program termination
- #self.__journal.wait(self.sleeptime) != journal.NOP
- ##
- ## wait for entries without sleep in intervals, because "sleeping" in journal.wait:
- Utils.wait_for(lambda: not self.active or \
- self.__journal.wait(Utils.DEFAULT_SLEEP_INTERVAL) != journal.NOP,
- self.sleeptime, 0.00001)
+ ## wait for entries using journal.wait:
+ if wcode == journal.NOP and self.inOperation:
+ ## todo: find better method as wait_for to break (e.g. notify) journal.wait(self.sleeptime),
+ ## don't use `journal.close()` for it, because in some python/systemd implementation it may
+ ## cause abnormal program termination (e. g. segfault)
+ ##
+ ## wait for entries without sleep in intervals, because "sleeping" in journal.wait,
+ ## journal.NOP is 0, so we can wait for non zero (APPEND or INVALIDATE):
+ wcode = Utils.wait_for(lambda: not self.active and journal.APPEND or \
+ self.__journal.wait(Utils.DEFAULT_SLEEP_INTERVAL),
+ self.sleeptime, 0.00001)
+ ## if invalidate (due to rotation, vacuuming or journal files added/removed etc):
+ if self.active and wcode == journal.INVALIDATE:
+ if self.ticks:
+ logSys.log(logging.DEBUG, "[%s] Invalidate signaled, take a little break (rotation ends)", self.jailName)
+ time.sleep(self.sleeptime * 0.25)
+ Utils.wait_for(lambda: not self.active or \
+ self.__journal.wait(Utils.DEFAULT_SLEEP_INTERVAL) != journal.INVALIDATE,
+ self.sleeptime * 3, 0.00001)
+ if self.ticks:
+ # move back and forth to ensure do not end up in dead space by rotation or vacuuming,
+ # if position beyond end of journal (gh-3396)
+ try:
+ if self.__journal.get_previous(): self.__journal.get_next()
+ except OSError:
+ pass
if self.idle:
# because journal.wait will returns immediatelly if we have records in journal,
# just wait a little bit here for not idle, to prevent hi-load:
@@ -292,43 +361,95 @@ class FilterSystemd(JournalFilter): # pragma: systemd no cover
e, exc_info=logSys.getEffectiveLevel() <= logging.DEBUG)
self.ticks += 1
if logentry:
- self.processLineAndAdd(
- *self.formatJournalEntry(logentry))
+ line, tm = self.formatJournalEntry(logentry)
+ # switch "in operation" mode if we'll find start entry (+ some delta):
+ if not self.inOperation:
+ if tm >= MyTime.time() - 1: # reached now (approximated):
+ self.inOperationMode()
+ elif startTime[0] == 1:
+ # if it reached start entry (or get read time larger than start time)
+ if logentry.get('__CURSOR') == startTime[2] or tm > startTime[1]:
+ # give the filter same time it needed to reach the start entry:
+ startTime = (0, MyTime.time()*2 - startTime[1])
+ elif tm > startTime[1]: # reached start time (approximated):
+ self.inOperationMode()
+ # process line
+ self.processLineAndAdd(line, tm)
self.__modified += 1
if self.__modified >= 100: # todo: should be configurable
+ wcode = journal.APPEND; # don't need wait - there are still unprocessed entries
break
else:
+ # "in operation" mode since we don't have messages anymore (reached end of journal):
+ if not self.inOperation:
+ self.inOperationMode()
+ wcode = journal.NOP; # enter wait - no more entries to process
break
- if self.__modified:
- try:
- while True:
- ticket = self.failManager.toBan()
- self.jail.putFailTicket(ticket)
- except FailManagerEmpty:
- self.failManager.cleanup(MyTime.time())
+ self.__modified = 0
+ if self.ticks % 10 == 0:
+ self.performSvc()
+ # update position in log (time and iso string):
+ if self.jail.database:
+ if line:
+ self._pendDBUpdates['systemd-journal'] = (tm, line[1])
+ line = None
+ if self._pendDBUpdates and (
+ self.ticks % 100 == 0
+ or MyTime.time() >= self._nextUpdateTM
+ or not self.active
+ ):
+ self._updateDBPending()
+ self._nextUpdateTM = MyTime.time() + Utils.DEFAULT_SLEEP_TIME * 5
except Exception as e: # pragma: no cover
if not self.active: # if not active - error by stop...
break
+ wcode = journal.NOP
logSys.error("Caught unhandled exception in main cycle: %r", e,
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
# incr common error counter:
- self.commonError()
+ self.commonError("unhandled", e)
logSys.debug("[%s] filter terminated", self.jailName)
# close journal:
+ self.closeJournal()
+
+ logSys.debug("[%s] filter exited (systemd)", self.jailName)
+ return True
+
+ def closeJournal(self):
try:
- if self.__journal:
- self.__journal.close()
+ jnl, self.__journal = self.__journal, None
+ if jnl:
+ jnl.close()
except Exception as e: # pragma: no cover
logSys.error("Close journal failed: %r", e,
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
- logSys.debug("[%s] filter exited (systemd)", self.jailName)
- return True
def status(self, flavor="basic"):
ret = super(FilterSystemd, self).status(flavor=flavor)
ret.append(("Journal matches",
[" + ".join(" ".join(match) for match in self.__matches)]))
return ret
+
+ def _updateDBPending(self):
+ """Apply pending updates (jornal position) to database.
+ """
+ db = self.jail.database
+ while True:
+ try:
+ log, args = self._pendDBUpdates.popitem()
+ except KeyError:
+ break
+ db.updateJournal(self.jail, log, *args)
+
+ def onStop(self):
+ """Stop monitoring of journal. Invoked after run method.
+ """
+ # close journal:
+ self.closeJournal()
+ # ensure positions of pending logs are up-to-date:
+ if self._pendDBUpdates and self.jail.database:
+ self._updateDBPending()
+
diff --git a/fail2ban/server/ipdns.py b/fail2ban/server/ipdns.py
index 2841eac1..b435c6df 100644
--- a/fail2ban/server/ipdns.py
+++ b/fail2ban/server/ipdns.py
@@ -42,6 +42,32 @@ def asip(ip):
return ip
return IPAddr(ip)
+def getfqdn(name=''):
+ """Get fully-qualified hostname of given host, thereby resolve of an external
+ IPs and name will be preferred before the local domain (or a loopback), see gh-2438
+ """
+ try:
+ name = name or socket.gethostname()
+ names = (
+ ai[3] for ai in socket.getaddrinfo(
+ name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME
+ ) if ai[3]
+ )
+ if names:
+ # first try to find a fqdn starting with the host name like www.domain.tld for www:
+ pref = name+'.'
+ first = None
+ for ai in names:
+ if ai.startswith(pref):
+ return ai
+ if not first: first = ai
+ # not found - simply use first known fqdn:
+ return first
+ except socket.error:
+ pass
+ # fallback to python's own getfqdn routine:
+ return socket.getfqdn(name)
+
##
# Utils class for DNS handling.
@@ -64,18 +90,18 @@ class DNSUtils:
if ips is not None:
return ips
# retrieve ips
- ips = list()
+ ips = set()
saveerr = None
- for fam, ipfam in ((socket.AF_INET, IPAddr.FAM_IPv4), (socket.AF_INET6, IPAddr.FAM_IPv6)):
+ for fam in ((socket.AF_INET,socket.AF_INET6) if DNSUtils.IPv6IsAllowed() else (socket.AF_INET,)):
try:
for result in socket.getaddrinfo(dns, None, fam, 0, socket.IPPROTO_TCP):
# if getaddrinfo returns something unexpected:
if len(result) < 4 or not len(result[4]): continue
# get ip from `(2, 1, 6, '', ('127.0.0.1', 0))`,be sure we've an ip-string
# (some python-versions resp. host configurations causes returning of integer there):
- ip = IPAddr(str(result[4][0]), ipfam)
+ ip = IPAddr(str(result[4][0]), IPAddr._AF2FAM(fam))
if ip.isValid:
- ips.append(ip)
+ ips.add(ip)
except Exception as e:
saveerr = e
if not ips and saveerr:
@@ -103,19 +129,19 @@ class DNSUtils:
def textToIp(text, useDns):
""" Return the IP of DNS found in a given text.
"""
- ipList = list()
+ ipList = set()
# Search for plain IP
plainIP = IPAddr.searchIP(text)
if plainIP is not None:
ip = IPAddr(plainIP)
if ip.isValid:
- ipList.append(ip)
+ ipList.add(ip)
# If we are allowed to resolve -- give it a try if nothing was found
if useDns in ("yes", "warn") and not ipList:
# Try to get IP from possible DNS
ip = DNSUtils.dnsToIp(text)
- ipList.extend(ip)
+ ipList.update(ip)
if ip and useDns == "warn":
logSys.warning("Determined IP using DNS Lookup: %s = %s",
text, ipList)
@@ -128,54 +154,145 @@ class DNSUtils:
# try find cached own hostnames (this tuple-key cannot be used elsewhere):
key = ('self','hostname', fqdn)
name = DNSUtils.CACHE_ipToName.get(key)
+ if name is not None:
+ return name
# get it using different ways (hostname, fully-qualified or vice versa):
- if name is None:
- name = ''
- for hostname in (
- (socket.getfqdn, socket.gethostname) if fqdn else (socket.gethostname, socket.getfqdn)
- ):
- try:
- name = hostname()
- break
- except Exception as e: # pragma: no cover
- logSys.warning("Retrieving own hostnames failed: %s", e)
+ name = ''
+ for hostname in (
+ (getfqdn, socket.gethostname) if fqdn else (socket.gethostname, getfqdn)
+ ):
+ try:
+ name = hostname()
+ break
+ except Exception as e: # pragma: no cover
+ logSys.warning("Retrieving own hostnames failed: %s", e)
# cache and return :
DNSUtils.CACHE_ipToName.set(key, name)
return name
+ # key find cached own hostnames (this tuple-key cannot be used elsewhere):
+ _getSelfNames_key = ('self','dns')
+
@staticmethod
def getSelfNames():
"""Get own host names of self"""
- # try find cached own hostnames (this tuple-key cannot be used elsewhere):
- key = ('self','dns')
- names = DNSUtils.CACHE_ipToName.get(key)
+ # try find cached own hostnames:
+ names = DNSUtils.CACHE_ipToName.get(DNSUtils._getSelfNames_key)
+ if names is not None:
+ return names
# get it using different ways (a set with names of localhost, hostname, fully qualified):
- if names is None:
- names = set([
- 'localhost', DNSUtils.getHostname(False), DNSUtils.getHostname(True)
- ]) - set(['']) # getHostname can return ''
+ names = set([
+ 'localhost', DNSUtils.getHostname(False), DNSUtils.getHostname(True)
+ ]) - set(['']) # getHostname can return ''
# cache and return :
- DNSUtils.CACHE_ipToName.set(key, names)
+ DNSUtils.CACHE_ipToName.set(DNSUtils._getSelfNames_key, names)
return names
+ # key to find cached network interfaces IPs (this tuple-key cannot be used elsewhere):
+ _getNetIntrfIPs_key = ('netintrf','ips')
+
+ @staticmethod
+ def getNetIntrfIPs():
+ """Get own IP addresses of self"""
+ # to find cached own IPs:
+ ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getNetIntrfIPs_key)
+ if ips is not None:
+ return ips
+ # try to obtain from network interfaces if possible (implemented for this platform):
+ try:
+ ips = IPAddrSet([a for ni, a in DNSUtils._NetworkInterfacesAddrs()])
+ except:
+ ips = IPAddrSet()
+ # cache and return :
+ DNSUtils.CACHE_nameToIp.set(DNSUtils._getNetIntrfIPs_key, ips)
+ return ips
+
+ # key to find cached own IPs (this tuple-key cannot be used elsewhere):
+ _getSelfIPs_key = ('self','ips')
+
@staticmethod
def getSelfIPs():
"""Get own IP addresses of self"""
- # try find cached own IPs (this tuple-key cannot be used elsewhere):
- key = ('self','ips')
- ips = DNSUtils.CACHE_nameToIp.get(key)
- # get it using different ways (a set with IPs of localhost, hostname, fully qualified):
- if ips is None:
- ips = set()
- for hostname in DNSUtils.getSelfNames():
- try:
- ips |= set(DNSUtils.textToIp(hostname, 'yes'))
- except Exception as e: # pragma: no cover
- logSys.warning("Retrieving own IPs of %s failed: %s", hostname, e)
+ # to find cached own IPs:
+ ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getSelfIPs_key)
+ if ips is not None:
+ return ips
+ # firstly try to obtain from network interfaces if possible (implemented for this platform):
+ ips = IPAddrSet(DNSUtils.getNetIntrfIPs())
+ # extend it using different ways (a set with IPs of localhost, hostname, fully qualified):
+ for hostname in DNSUtils.getSelfNames():
+ try:
+ ips |= IPAddrSet(DNSUtils.dnsToIp(hostname))
+ except Exception as e: # pragma: no cover
+ logSys.warning("Retrieving own IPs of %s failed: %s", hostname, e)
# cache and return :
- DNSUtils.CACHE_nameToIp.set(key, ips)
+ DNSUtils.CACHE_nameToIp.set(DNSUtils._getSelfIPs_key, ips)
return ips
+ _IPv6IsAllowed = None
+
+ @staticmethod
+ def _IPv6IsSupportedBySystem():
+ if not socket.has_ipv6:
+ return False
+ # try to check sysctl net.ipv6.conf.all.disable_ipv6:
+ try:
+ with open('/proc/sys/net/ipv6/conf/all/disable_ipv6', 'rb') as f:
+ # if 1 - disabled, 0 - enabled
+ return not int(f.read())
+ except:
+ pass
+ s = None
+ try:
+ # try to create INET6 socket:
+ s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ # bind it to free port for any interface supporting IPv6:
+ s.bind(("", 0));
+ return True
+ except Exception as e: # pragma: no cover
+ if hasattr(e, 'errno'):
+ import errno
+ # negative (-9 'Address family not supported', etc) or not available/supported:
+ if e.errno < 0 or e.errno in (errno.EADDRNOTAVAIL, errno.EAFNOSUPPORT):
+ return False
+ # in use:
+ if e.errno in (errno.EADDRINUSE, errno.EACCES): # normally unreachable (free port and root)
+ return True
+ finally:
+ if s: s.close()
+ # unable to detect:
+ return None
+
+ @staticmethod
+ def setIPv6IsAllowed(value):
+ DNSUtils._IPv6IsAllowed = value
+ logSys.debug("IPv6 is %s", ('on' if value else 'off') if value is not None else 'auto')
+ return value
+
+ # key to find cached value of IPv6 allowance (this tuple-key cannot be used elsewhere):
+ _IPv6IsAllowed_key = ('self','ipv6-allowed')
+
+ @staticmethod
+ def IPv6IsAllowed():
+ if DNSUtils._IPv6IsAllowed is not None:
+ return DNSUtils._IPv6IsAllowed
+ v = DNSUtils.CACHE_nameToIp.get(DNSUtils._IPv6IsAllowed_key)
+ if v is not None:
+ return v
+ v = DNSUtils._IPv6IsSupportedBySystem()
+ if v is None:
+ # detect by IPs of host:
+ ips = DNSUtils.getNetIntrfIPs()
+ if not ips:
+ DNSUtils._IPv6IsAllowed = True; # avoid self recursion from getSelfIPs -> dnsToIp -> IPv6IsAllowed
+ try:
+ ips = DNSUtils.getSelfIPs()
+ finally:
+ DNSUtils._IPv6IsAllowed = None
+ v = any((':' in ip.ntoa) for ip in ips)
+ DNSUtils.CACHE_nameToIp.set(DNSUtils._IPv6IsAllowed_key, v)
+ return v
+
##
# Class for IP address handling.
@@ -197,14 +314,23 @@ class IPAddr(object):
__slots__ = '_family','_addr','_plen','_maskplen','_raw'
# todo: make configurable the expired time and max count of cache entries:
- CACHE_OBJ = Utils.Cache(maxCount=1000, maxTime=5*60)
+ CACHE_OBJ = Utils.Cache(maxCount=10000, maxTime=5*60)
CIDR_RAW = -2
CIDR_UNSPEC = -1
FAM_IPv4 = CIDR_RAW - socket.AF_INET
FAM_IPv6 = CIDR_RAW - socket.AF_INET6
+ @staticmethod
+ def _AF2FAM(v):
+ return IPAddr.CIDR_RAW - v
def __new__(cls, ipstr, cidr=CIDR_UNSPEC):
+ if cidr == IPAddr.CIDR_UNSPEC and isinstance(ipstr, (tuple, list)):
+ cidr = IPAddr.CIDR_RAW
+ if cidr == IPAddr.CIDR_RAW: # don't cache raw
+ ip = super(IPAddr, cls).__new__(cls)
+ ip.__init(ipstr, cidr)
+ return ip
# check already cached as IPAddr
args = (ipstr, cidr)
ip = IPAddr.CACHE_OBJ.get(args)
@@ -221,7 +347,8 @@ class IPAddr(object):
return ip
ip = super(IPAddr, cls).__new__(cls)
ip.__init(ipstr, cidr)
- IPAddr.CACHE_OBJ.set(args, ip)
+ if ip._family != IPAddr.CIDR_RAW:
+ IPAddr.CACHE_OBJ.set(args, ip)
return ip
@staticmethod
@@ -301,7 +428,7 @@ class IPAddr(object):
return repr(self.ntoa)
def __str__(self):
- return self.ntoa
+ return self.ntoa if isinstance(self.ntoa, basestring) else str(self.ntoa)
def __reduce__(self):
"""IPAddr pickle-handler, that simply wraps IPAddr to the str
@@ -343,6 +470,12 @@ class IPAddr(object):
"""
return self._family != socket.AF_UNSPEC
+ @property
+ def isSingle(self):
+ """Returns whether the object is a single IP address (not DNS and subnet)
+ """
+ return self._plen == {socket.AF_INET: 32, socket.AF_INET6: 128}.get(self._family, -1000)
+
def __eq__(self, other):
if self._family == IPAddr.CIDR_RAW and not isinstance(other, IPAddr):
return self._raw == other
@@ -475,6 +608,14 @@ class IPAddr(object):
return (self.addr & mask) == net.addr
+ def contains(self, ip):
+ """Return whether the object (as network) contains given IP
+ """
+ return isinstance(ip, IPAddr) and (ip == self or ip.isInNet(self))
+
+ def __contains__(self, ip):
+ return self.contains(ip)
+
# Pre-calculated map: addr to maskplen
def __getMaskMap():
m6 = (1 << 128)-1
@@ -524,3 +665,135 @@ class IPAddr(object):
# An IPv4 compatible IPv6 to be reused
IPAddr.IP6_4COMPAT = IPAddr("::ffff:0:0", 96)
+
+
+class IPAddrSet(set):
+
+ hasSubNet = False
+
+ def __init__(self, ips=[]):
+ ips2 = set()
+ for ip in ips:
+ if not isinstance(ip, IPAddr): ip = IPAddr(ip)
+ ips2.add(ip)
+ self.hasSubNet |= not ip.isSingle
+ set.__init__(self, ips2)
+
+ def add(self, ip):
+ if not isinstance(ip, IPAddr): ip = IPAddr(ip)
+ self.hasSubNet |= not ip.isSingle
+ set.add(self, ip)
+
+ def __contains__(self, ip):
+ if not isinstance(ip, IPAddr): ip = IPAddr(ip)
+ # IP can be found directly or IP is in each subnet:
+ return set.__contains__(self, ip) or (self.hasSubNet and any(n.contains(ip) for n in self))
+
+
+def _NetworkInterfacesAddrs(withMask=False):
+
+ # Closure implementing lazy load modules and libc and define _NetworkInterfacesAddrs on demand:
+ # Currently tested on Linux only (TODO: implement for MacOS, Solaris, etc)
+ try:
+ from ctypes import (
+ Structure, Union, POINTER,
+ pointer, get_errno, cast,
+ c_ushort, c_byte, c_void_p, c_char_p, c_uint, c_int, c_uint16, c_uint32
+ )
+ import ctypes.util
+ import ctypes
+
+ class struct_sockaddr(Structure):
+ _fields_ = [
+ ('sa_family', c_ushort),
+ ('sa_data', c_byte * 14),]
+
+ class struct_sockaddr_in(Structure):
+ _fields_ = [
+ ('sin_family', c_ushort),
+ ('sin_port', c_uint16),
+ ('sin_addr', c_byte * 4)]
+
+ class struct_sockaddr_in6(Structure):
+ _fields_ = [
+ ('sin6_family', c_ushort),
+ ('sin6_port', c_uint16),
+ ('sin6_flowinfo', c_uint32),
+ ('sin6_addr', c_byte * 16),
+ ('sin6_scope_id', c_uint32)]
+
+ class union_ifa_ifu(Union):
+ _fields_ = [
+ ('ifu_broadaddr', POINTER(struct_sockaddr)),
+ ('ifu_dstaddr', POINTER(struct_sockaddr)),]
+
+ class struct_ifaddrs(Structure):
+ pass
+ struct_ifaddrs._fields_ = [
+ ('ifa_next', POINTER(struct_ifaddrs)),
+ ('ifa_name', c_char_p),
+ ('ifa_flags', c_uint),
+ ('ifa_addr', POINTER(struct_sockaddr)),
+ ('ifa_netmask', POINTER(struct_sockaddr)),
+ ('ifa_ifu', union_ifa_ifu),
+ ('ifa_data', c_void_p),]
+
+ libc = ctypes.CDLL(ctypes.util.find_library('c') or "")
+ if not libc.getifaddrs: # pragma: no cover
+ raise NotImplementedError('libc.getifaddrs is not available')
+
+ def ifap_iter(ifap):
+ ifa = ifap.contents
+ while True:
+ yield ifa
+ if not ifa.ifa_next:
+ break
+ ifa = ifa.ifa_next.contents
+
+ def getfamaddr(ifa, withMask=False):
+ sa = ifa.ifa_addr.contents
+ fam = sa.sa_family
+ if fam == socket.AF_INET:
+ sa = cast(pointer(sa), POINTER(struct_sockaddr_in)).contents
+ addr = socket.inet_ntop(fam, sa.sin_addr)
+ if withMask:
+ nm = ifa.ifa_netmask.contents
+ if nm is not None and nm.sa_family == socket.AF_INET:
+ nm = cast(pointer(nm), POINTER(struct_sockaddr_in)).contents
+ addr += '/'+socket.inet_ntop(fam, nm.sin_addr)
+ return IPAddr(addr)
+ elif fam == socket.AF_INET6:
+ sa = cast(pointer(sa), POINTER(struct_sockaddr_in6)).contents
+ addr = socket.inet_ntop(fam, sa.sin6_addr)
+ if withMask:
+ nm = ifa.ifa_netmask.contents
+ if nm is not None and nm.sa_family == socket.AF_INET6:
+ nm = cast(pointer(nm), POINTER(struct_sockaddr_in6)).contents
+ addr += '/'+socket.inet_ntop(fam, nm.sin6_addr)
+ return IPAddr(addr)
+ return None
+
+ def _NetworkInterfacesAddrs(withMask=False):
+ ifap = POINTER(struct_ifaddrs)()
+ result = libc.getifaddrs(pointer(ifap))
+ if result != 0:
+ raise OSError(get_errno())
+ del result
+ try:
+ for ifa in ifap_iter(ifap):
+ name = ifa.ifa_name.decode("UTF-8")
+ addr = getfamaddr(ifa, withMask)
+ if addr:
+ yield name, addr
+ finally:
+ libc.freeifaddrs(ifap)
+
+ except Exception as e: # pragma: no cover
+ _init_error = NotImplementedError(e)
+ def _NetworkInterfacesAddrs():
+ raise _init_error
+
+ DNSUtils._NetworkInterfacesAddrs = staticmethod(_NetworkInterfacesAddrs);
+ return _NetworkInterfacesAddrs(withMask)
+
+DNSUtils._NetworkInterfacesAddrs = staticmethod(_NetworkInterfacesAddrs);
diff --git a/fail2ban/server/jail.py b/fail2ban/server/jail.py
index c6d61856..2c84e475 100644
--- a/fail2ban/server/jail.py
+++ b/fail2ban/server/jail.py
@@ -161,6 +161,10 @@ class Jail(object):
"""
return self.__db
+ @database.setter
+ def database(self, value):
+ self.__db = value;
+
@property
def filter(self):
"""The filter which the jail is using to monitor log files.
@@ -192,6 +196,12 @@ class Jail(object):
("Actions", self.actions.status(flavor=flavor)),
]
+ @property
+ def hasFailTickets(self):
+ """Retrieve whether queue has tickets to ban.
+ """
+ return not self.__queue.empty()
+
def putFailTicket(self, ticket):
"""Add a fail ticket to the jail.
@@ -281,11 +291,11 @@ class Jail(object):
# use ban time as search time if we have not enabled a increasing:
forbantime = self.actions.getBanTime()
for ticket in self.database.getCurrentBans(jail=self, forbantime=forbantime,
- correctBanTime=correctBanTime
+ correctBanTime=correctBanTime, maxmatches=self.filter.failManager.maxMatches
):
try:
#logSys.debug('restored ticket: %s', ticket)
- if self.filter.inIgnoreIPList(ticket.getIP(), log_ignore=True): continue
+ if self.filter.inIgnoreIPList(ticket.getID(), log_ignore=True): continue
# mark ticked was restored from database - does not put it again into db:
ticket.restored = True
# correct start time / ban time (by the same end of ban):
diff --git a/fail2ban/server/jails.py b/fail2ban/server/jails.py
index 972a8c4b..eaaa9518 100644
--- a/fail2ban/server/jails.py
+++ b/fail2ban/server/jails.py
@@ -22,7 +22,10 @@ __copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2013- Yaroslav Halchenko"
__license__ = "GPL"
from threading import Lock
-from collections import Mapping
+try:
+ from collections.abc import Mapping
+except ImportError:
+ from collections import Mapping
from ..exceptions import DuplicateJailException, UnknownJailException
from .jail import Jail
@@ -64,8 +67,7 @@ class Jails(Mapping):
"""
with self.__lock:
if name in self._jails:
- if noduplicates:
- raise DuplicateJailException(name)
+ raise DuplicateJailException(name)
else:
self._jails[name] = Jail(name, backend, db)
diff --git a/fail2ban/server/jailthread.py b/fail2ban/server/jailthread.py
index 5c7afd38..67955a06 100644
--- a/fail2ban/server/jailthread.py
+++ b/fail2ban/server/jailthread.py
@@ -29,7 +29,7 @@ from threading import Thread
from abc import abstractmethod
from .utils import Utils
-from ..helpers import excepthook
+from ..helpers import excepthook, prctl_set_th_name
class JailThread(Thread):
@@ -67,6 +67,8 @@ class JailThread(Thread):
def run_with_except_hook(*args, **kwargs):
try:
run(*args, **kwargs)
+ # call on stop callback to do some finalizations:
+ self.onStop()
except Exception as e:
# avoid very sporadic error "'NoneType' object has no attribute 'exc_info'" (https://bugs.python.org/issue7336)
# only extremely fast systems are affected ATM (2.7 / 3.x), if thread ends nothing is available here.
@@ -76,6 +78,15 @@ class JailThread(Thread):
print(e)
self.run = run_with_except_hook
+ if sys.version_info >= (3,): # pragma: 2.x no cover
+ def _bootstrap(self):
+ prctl_set_th_name(self.name)
+ return super(JailThread, self)._bootstrap();
+ else: # pragma: 3.x no cover
+ def __bootstrap(self):
+ prctl_set_th_name(self.name)
+ return Thread._Thread__bootstrap(self)
+
@abstractmethod
def status(self, flavor="basic"): # pragma: no cover - abstract
"""Abstract - Should provide status information.
@@ -88,6 +99,12 @@ class JailThread(Thread):
self.active = True
super(JailThread, self).start()
+ @abstractmethod
+ def onStop(self): # pragma: no cover - absract
+ """Abstract - Called when thread ends (after run).
+ """
+ pass
+
def stop(self):
"""Sets `active` property to False, to flag run method to return.
"""
@@ -108,4 +125,9 @@ class JailThread(Thread):
if self.active is not None:
super(JailThread, self).join()
-
+## python 2.x replace binding of private __bootstrap method:
+if sys.version_info < (3,): # pragma: 3.x no cover
+ JailThread._Thread__bootstrap = JailThread._JailThread__bootstrap
+## python 3.9, restore isAlive method:
+elif not hasattr(JailThread, 'isAlive'): # pragma: 2.x no cover
+ JailThread.isAlive = JailThread.is_alive
diff --git a/fail2ban/server/mytime.py b/fail2ban/server/mytime.py
index 49199887..315d8a30 100644
--- a/fail2ban/server/mytime.py
+++ b/fail2ban/server/mytime.py
@@ -113,6 +113,19 @@ class MyTime:
return time.localtime(x)
else:
return time.localtime(MyTime.myTime)
+
+ @staticmethod
+ def time2str(unixTime, format="%Y-%m-%d %H:%M:%S"):
+ """Convert time to a string representing as date and time using given format.
+ Default format is ISO 8601, YYYY-MM-DD HH:MM:SS without microseconds.
+
+ @return ISO-capable string representation of given unixTime
+ """
+ # consider end of 9999th year (in GMT+23 to avoid year overflow in other TZ)
+ dt = datetime.datetime.fromtimestamp(
+ unixTime).replace(microsecond=0
+ ) if unixTime < 253402214400 else datetime.datetime(9999, 12, 31, 23, 59, 59)
+ return dt.strftime(format)
## precreate/precompile primitives used in str2seconds:
@@ -161,3 +174,62 @@ class MyTime:
val = rexp.sub(rpl, val)
val = MyTime._str2sec_fini.sub(r"\1+\2", val)
return eval(val)
+
+ class seconds2str():
+ """Converts seconds to string on demand (if string representation needed).
+ Ex: seconds2str(86400*390) = 1y 3w 4d
+ seconds2str(86400*368) = 1y 3d
+ seconds2str(86400*365.5) = 1y
+ seconds2str(86400*2+3600*7+60*15) = 2d 7h 15m
+ seconds2str(86400*2+3599) = 2d 1h
+ seconds2str(3600-5) = 1h
+ seconds2str(3600-10) = 59m 50s
+ seconds2str(59) = 59s
+ """
+ def __init__(self, sec):
+ self.sec = sec
+ def __str__(self):
+ # s = str(datetime.timedelta(seconds=int(self.sec)))
+ # return s if s[-3:] != ":00" else s[:-3]
+ s = self.sec; c = 3
+ # automatic accuracy: round by large values (and maximally 3 groups)
+ if s >= 31536000: # a year as 365*24*60*60 (don't need to consider leap year by this accuracy)
+ s = int(round(float(s)/86400)) # round by a day
+ r = str(s//365) + 'y '; s %= 365
+ if s >= 7:
+ r += str(s//7) + 'w '; s %= 7
+ if s:
+ r += str(s) + 'd '
+ return r[:-1]
+ if s >= 604800: # a week as 24*60*60*7
+ s = int(round(float(s)/3600)) # round by a hour
+ r = str(s//168) + 'w '; s %= 168
+ if s >= 24:
+ r += str(s//24) + 'd '; s %= 24
+ if s:
+ r += str(s) + 'h '
+ return r[:-1]
+ if s >= 86400: # a day as 24*60*60
+ s = int(round(float(s)/60)) # round by a minute
+ r = str(s//1440) + 'd '; s %= 1440
+ if s >= 60:
+ r += str(s//60) + 'h '; s %= 60
+ if s:
+ r += str(s) + 'm '
+ return r[:-1]
+ if s >= 3595: # a hour as 60*60 (- 5 seconds)
+ s = int(round(float(s)/10)) # round by 10 seconds
+ r = str(s//360) + 'h '; s %= 360
+ if s >= 6: # a minute
+ r += str(s//6) + 'm '; s %= 6
+ return r[:-1]
+ r = ''
+ if s >= 60: # a minute
+ r += str(s//60) + 'm '; s %= 60
+ if s: # remaining seconds
+ r += str(s) + 's '
+ elif not self.sec: # 0s
+ r = '0 '
+ return r[:-1]
+ def __repr__(self):
+ return self.__str__()
diff --git a/fail2ban/server/observer.py b/fail2ban/server/observer.py
index c3fa7d54..b1c9b37d 100644
--- a/fail2ban/server/observer.py
+++ b/fail2ban/server/observer.py
@@ -62,7 +62,7 @@ class ObserverThread(JailThread):
def __init__(self):
# init thread
- super(ObserverThread, self).__init__(name='Observer')
+ super(ObserverThread, self).__init__(name='f2b/observer')
# before started - idle:
self.idle = True
## Event queue
@@ -87,7 +87,7 @@ class ObserverThread(JailThread):
except KeyError:
raise KeyError("Invalid event index : %s" % i)
- def __delitem__(self, name):
+ def __delitem__(self, i):
try:
del self._queue[i]
except KeyError:
@@ -146,9 +146,11 @@ class ObserverThread(JailThread):
def pulse_notify(self):
"""Notify wakeup (sets /and resets/ notify event)
"""
- if not self._paused and self._notify:
- self._notify.set()
- #self._notify.clear()
+ if not self._paused:
+ n = self._notify
+ if n:
+ n.set()
+ #n.clear()
def add(self, *event):
"""Add a event to queue and notify thread to wake up.
@@ -230,13 +232,14 @@ class ObserverThread(JailThread):
if self._paused:
continue
else:
- ## notify event deleted (shutdown) - just sleep a litle bit (waiting for shutdown events, prevent high cpu usage)
+ ## notify event deleted (shutdown) - just sleep a little bit (waiting for shutdown events, prevent high cpu usage)
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
## stop by shutdown and empty queue :
if not self.is_full:
break
## end of main loop - exit
logSys.info("Observer stopped, %s events remaining.", len(self._queue))
+ self._notify = None
#print("Observer stopped, %s events remaining." % len(self._queue))
except Exception as e:
logSys.error('Observer stopped after error: %s', e, exc_info=True)
@@ -262,9 +265,8 @@ class ObserverThread(JailThread):
if not self.active:
super(ObserverThread, self).start()
- def stop(self):
+ def stop(self, wtime=5, forceQuit=True):
if self.active and self._notify:
- wtime = 5
logSys.info("Observer stop ... try to end queue %s seconds", wtime)
#print("Observer stop ....")
# just add shutdown job to make possible wait later until full (events remaining)
@@ -276,10 +278,15 @@ class ObserverThread(JailThread):
#self.pulse_notify()
self._notify = None
# wait max wtime seconds until full (events remaining)
- self.wait_empty(wtime)
- n.clear()
- self.active = False
- self.wait_idle(0.5)
+ if self.wait_empty(wtime) or forceQuit:
+ n.clear()
+ self.active = False; # leave outer (active) loop
+ self._paused = True; # leave inner (queue) loop
+ self.__db = None
+ else:
+ self._notify = n
+ return self.wait_idle(min(wtime, 0.5)) and not self.is_full
+ return True
@property
def is_full(self):
@@ -357,15 +364,15 @@ class ObserverThread(JailThread):
## [Async] ban time increment functionality ...
## -----------------------------------------
- def failureFound(self, failManager, jail, ticket):
+ def failureFound(self, jail, ticket):
""" Notify observer a failure for ip was found
Observer will check ip was known (bad) and possibly increase an retry count
"""
# check jail active :
- if not jail.isAlive():
+ if not jail.isAlive() or not jail.getBanTimeExtra("increment"):
return
- ip = ticket.getIP()
+ ip = ticket.getID()
unixTime = ticket.getTime()
logSys.debug("[%s] Observer: failure found %s", jail.name, ip)
# increase retry count for known (bad) ip, corresponding banCount of it (one try will count than 2, 3, 5, 9 ...) :
@@ -373,7 +380,7 @@ class ObserverThread(JailThread):
retryCount = 1
timeOfBan = None
try:
- maxRetry = failManager.getMaxRetry()
+ maxRetry = jail.filter.failManager.getMaxRetry()
db = jail.database
if db is not None:
for banCount, timeOfBan, lastBanTime in db.getBan(ip, jail):
@@ -393,21 +400,15 @@ class ObserverThread(JailThread):
return
# retry counter was increased - add it again:
logSys.info("[%s] Found %s, bad - %s, %s # -> %s%s", jail.name, ip,
- datetime.datetime.fromtimestamp(unixTime).strftime("%Y-%m-%d %H:%M:%S"), banCount, retryCount,
+ MyTime.time2str(unixTime), banCount, retryCount,
(', Ban' if retryCount >= maxRetry else ''))
# retryCount-1, because a ticket was already once incremented by filter self
- retryCount = failManager.addFailure(ticket, retryCount - 1, True)
+ retryCount = jail.filter.failManager.addFailure(ticket, retryCount - 1, True)
ticket.setBanCount(banCount)
# after observe we have increased attempt count, compare it >= maxretry ...
if retryCount >= maxRetry:
# perform the banning of the IP now (again)
- # [todo]: this code part will be used multiple times - optimize it later.
- try: # pragma: no branch - exception is the only way out
- while True:
- ticket = failManager.toBan(ip)
- jail.putFailTicket(ticket)
- except FailManagerEmpty:
- failManager.cleanup(MyTime.time())
+ jail.filter.performBan(ip)
except Exception as e:
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
@@ -434,7 +435,7 @@ class ObserverThread(JailThread):
if not jail.isAlive() or not jail.database:
return banTime
be = jail.getBanTimeExtra()
- ip = ticket.getIP()
+ ip = ticket.getID()
orgBanTime = banTime
# check ip was already banned (increment time of ban):
try:
@@ -454,8 +455,8 @@ class ObserverThread(JailThread):
# check current ticket time to prevent increasing for twice read tickets (restored from log file besides database after restart)
if ticket.getTime() > timeOfBan:
logSys.info('[%s] IP %s is bad: %s # last %s - incr %s to %s' % (jail.name, ip, banCount,
- datetime.datetime.fromtimestamp(timeOfBan).strftime("%Y-%m-%d %H:%M:%S"),
- datetime.timedelta(seconds=int(orgBanTime)), datetime.timedelta(seconds=int(banTime))));
+ MyTime.time2str(timeOfBan),
+ MyTime.seconds2str(orgBanTime), MyTime.seconds2str(banTime)))
else:
ticket.restored = True
break
@@ -473,7 +474,7 @@ class ObserverThread(JailThread):
return
try:
oldbtime = btime
- ip = ticket.getIP()
+ ip = ticket.getID()
logSys.debug("[%s] Observer: ban found %s, %s", jail.name, ip, btime)
# if not permanent and ban time was not set - check time should be increased:
if btime != -1 and ticket.getBanTime() is None:
@@ -484,8 +485,7 @@ class ObserverThread(JailThread):
# if not permanent
if btime != -1:
bendtime = ticket.getTime() + btime
- logtime = (datetime.timedelta(seconds=int(btime)),
- datetime.datetime.fromtimestamp(bendtime).strftime("%Y-%m-%d %H:%M:%S"))
+ logtime = (MyTime.seconds2str(btime), MyTime.time2str(bendtime))
# check ban is not too old :
if bendtime < MyTime.time():
logSys.debug('Ignore old bantime %s', logtime[1])
@@ -514,7 +514,7 @@ class ObserverThread(JailThread):
"""
try:
btime = ticket.getBanTime()
- ip = ticket.getIP()
+ ip = ticket.getID()
logSys.debug("[%s] Observer: prolong %s, %s", jail.name, ip, btime)
# prolong ticket via actions that expected this:
jail.actions._prolongBan(ticket)
diff --git a/fail2ban/server/server.py b/fail2ban/server/server.py
index dfbbd5d7..660f7918 100644
--- a/fail2ban/server/server.py
+++ b/fail2ban/server/server.py
@@ -34,12 +34,12 @@ import sys
from .observer import Observers, ObserverThread
from .jails import Jails
-from .filter import FileFilter, JournalFilter
+from .filter import DNSUtils, FileFilter, JournalFilter
from .transmitter import Transmitter
from .asyncserver import AsyncServer, AsyncServerException
from .. import version
from ..helpers import getLogger, _as_bool, extractOptions, str2LogLevel, \
- getVerbosityFormat, excepthook
+ getVerbosityFormat, excepthook, prctl_set_th_name
# Gets the instance of the logger.
logSys = getLogger(__name__)
@@ -58,6 +58,23 @@ except ImportError: # pragma: no cover
def _thread_name():
return threading.current_thread().__class__.__name__
+try:
+ FileExistsError
+except NameError: # pragma: 3.x no cover
+ FileExistsError = OSError
+
+def _make_file_path(name):
+ """Creates path of file (last level only) on demand"""
+ name = os.path.dirname(name)
+ # only if it is absolute (e. g. important for socket, so if unix path):
+ if os.path.isabs(name):
+ # be sure path exists (create last level of directory on demand):
+ try:
+ os.mkdir(name)
+ except (OSError, FileExistsError) as e:
+ if e.errno != 17: # pragma: no cover - not EEXIST is not covered
+ raise
+
class Server:
@@ -97,7 +114,7 @@ class Server:
def start(self, sock, pidfile, force=False, observer=True, conf={}):
# First set the mask to only allow access to owner
- os.umask(0077)
+ os.umask(0o077)
# Second daemonize before logging etc, because it will close all handles:
if self.__daemon: # pragma: no cover
logSys.info("Starting in daemon mode")
@@ -111,6 +128,9 @@ class Server:
logSys.error(err)
raise ServerInitializationError(err)
# We are daemon.
+
+ # replace main thread (and process) name to identify server (for top/ps/pstree or diagnostic):
+ prctl_set_th_name(conf.get("pname", "fail2ban-server"))
# Set all logging parameters (or use default if not specified):
self.__verbose = conf.get("verbose", None)
@@ -139,6 +159,7 @@ class Server:
# Creates a PID file.
try:
logSys.debug("Creating PID file %s", pidfile)
+ _make_file_path(pidfile)
pidFile = open(pidfile, 'w')
pidFile.write("%s\n" % os.getpid())
pidFile.close()
@@ -154,6 +175,7 @@ class Server:
# Start the communication
logSys.debug("Starting communication")
try:
+ _make_file_path(sock)
self.__asyncServer = AsyncServer(self.__transm)
self.__asyncServer.onstart = conf.get('onstart')
self.__asyncServer.start(sock, force)
@@ -191,23 +213,26 @@ class Server:
signal.signal(s, sh)
# Give observer a small chance to complete its work before exit
- if Observers.Main is not None:
- Observers.Main.stop()
+ obsMain = Observers.Main
+ if obsMain is not None:
+ if obsMain.stop(forceQuit=False):
+ obsMain = None
+ Observers.Main = None
# Now stop all the jails
self.stopAllJail()
+ # Stop observer ultimately
+ if obsMain is not None:
+ obsMain.stop()
+
# Explicit close database (server can leave in a thread,
# so delayed GC can prevent commiting changes)
if self.__db:
self.__db.close()
self.__db = None
- # Stop observer and exit
- if Observers.Main is not None:
- Observers.Main.stop()
- Observers.Main = None
- # Stop async
+ # Stop async and exit
if self.__asyncServer is not None:
self.__asyncServer.stop()
self.__asyncServer = None
@@ -268,6 +293,11 @@ class Server:
for name in self.__jails.keys():
self.delJail(name, stop=False, join=True)
+ def clearCaches(self):
+ # we need to clear caches, to be able to recognize new IPs/families etc:
+ DNSUtils.CACHE_nameToIp.clear()
+ DNSUtils.CACHE_ipToName.clear()
+
def reloadJails(self, name, opts, begin):
if begin:
# begin reload:
@@ -289,6 +319,8 @@ class Server:
if "--restart" in opts:
self.stopJail(name)
else:
+ # invalidate caches by reload
+ self.clearCaches()
# first unban all ips (will be not restored after (re)start):
if "--unban" in opts:
self.setUnbanIP()
@@ -359,7 +391,7 @@ class Server:
if isinstance(filter_, FileFilter):
return filter_.getLogPaths()
else: # pragma: systemd no cover
- logSys.info("Jail %s is not a FileFilter instance" % name)
+ logSys.debug("Jail %s is not a FileFilter instance" % name)
return []
def addJournalMatch(self, name, match): # pragma: systemd no cover
@@ -377,7 +409,7 @@ class Server:
if isinstance(filter_, JournalFilter):
return filter_.getJournalMatch()
else:
- logSys.info("Jail %s is not a JournalFilter instance" % name)
+ logSys.debug("Jail %s is not a JournalFilter instance" % name)
return []
def setLogEncoding(self, name, encoding):
@@ -459,6 +491,12 @@ class Server:
def getUseDns(self, name):
return self.__jails[name].filter.getUseDns()
+ def setMaxMatches(self, name, value):
+ self.__jails[name].filter.failManager.maxMatches = value
+
+ def getMaxMatches(self, name):
+ return self.__jails[name].filter.failManager.maxMatches
+
def setMaxRetry(self, name, value):
self.__jails[name].filter.setMaxRetry(value)
@@ -489,27 +527,70 @@ class Server:
def setBanTime(self, name, value):
self.__jails[name].actions.setBanTime(value)
+ def addAttemptIP(self, name, *args):
+ return self.__jails[name].filter.addAttempt(*args)
+
def setBanIP(self, name, value):
- return self.__jails[name].filter.addBannedIP(value)
-
- def setUnbanIP(self, name=None, value=None):
+ return self.__jails[name].actions.addBannedIP(value)
+
+ def setUnbanIP(self, name=None, value=None, ifexists=True):
if name is not None:
- # in all jails:
+ # single jail:
jails = [self.__jails[name]]
else:
- # single jail:
+ # in all jails:
jails = self.__jails.values()
# unban given or all (if value is None):
cnt = 0
+ ifexists |= (name is None)
for jail in jails:
- cnt += jail.actions.removeBannedIP(value, ifexists=(name is None))
- if value and not cnt:
- logSys.info("%s is not banned", value)
+ cnt += jail.actions.removeBannedIP(value, ifexists=ifexists)
return cnt
+ def banned(self, name=None, ids=None):
+ if name is not None:
+ # single jail:
+ jails = [self.__jails[name]]
+ else:
+ # in all jails:
+ jails = self.__jails.values()
+ # check banned ids:
+ res = []
+ if name is None and ids:
+ for ip in ids:
+ ret = []
+ for jail in jails:
+ if jail.actions.getBanned([ip]):
+ ret.append(jail.name)
+ res.append(ret)
+ else:
+ for jail in jails:
+ ret = jail.actions.getBanned(ids)
+ if name is not None:
+ return ret
+ res.append(ret)
+ else:
+ res.append({jail.name: ret})
+ return res
+
def getBanTime(self, name):
return self.__jails[name].actions.getBanTime()
+ def getBanList(self, name, withTime=False):
+ """Returns the list of banned IP addresses for a jail.
+
+ Parameters
+ ----------
+ name : str
+ The name of a jail.
+
+ Returns
+ -------
+ list
+ The list of banned IP addresses.
+ """
+ return self.__jails[name].actions.getBanList(withTime)
+
def setBanTimeExtra(self, name, opt, value):
self.__jails[name].setBanTimeExtra(opt, value)
@@ -597,7 +678,10 @@ class Server:
return True
padding = logOptions.get('padding')
# set a format which is simpler for console use
- if systarget == "SYSLOG":
+ if systarget == "SYSTEMD-JOURNAL":
+ from systemd.journal import JournalHandler
+ hdlr = JournalHandler(SYSLOG_IDENTIFIER='fail2ban')
+ elif systarget == "SYSLOG":
facility = logOptions.get('facility', 'DAEMON').upper()
# backwards compatibility - default no padding for syslog handler:
if padding is None: padding = '0'
@@ -647,9 +731,7 @@ class Server:
except (ValueError, KeyError): # pragma: no cover
# Is known to be thrown after logging was shutdown once
# with older Pythons -- seems to be safe to ignore there
- # At least it was still failing on 2.6.2-0ubuntu1 (jaunty)
- if (2, 6, 3) <= sys.version_info < (3,) or \
- (3, 2) <= sys.version_info:
+ if sys.version_info < (3,) or sys.version_info >= (3, 2):
raise
# detailed format by deep log levels (as DEBUG=10):
if logger.getEffectiveLevel() <= logging.DEBUG: # pragma: no cover
@@ -675,7 +757,8 @@ class Server:
verbose = self.__verbose-1
fmt = getVerbosityFormat(verbose, addtime=addtime, padding=padding)
# tell the handler to use this format
- hdlr.setFormatter(logging.Formatter(fmt))
+ if target != "SYSTEMD-JOURNAL":
+ hdlr.setFormatter(logging.Formatter(fmt))
logger.addHandler(hdlr)
# Does not display this message at startup.
if self.__logTarget is not None:
@@ -714,7 +797,7 @@ class Server:
return self.__syslogSocket
def flushLogs(self):
- if self.__logTarget not in ['STDERR', 'STDOUT', 'SYSLOG']:
+ if self.__logTarget not in ['STDERR', 'STDOUT', 'SYSLOG', 'SYSTEMD-JOURNAL']:
for handler in getLogger("fail2ban").handlers:
try:
handler.doRollover()
@@ -729,6 +812,21 @@ class Server:
logSys.info("flush performed on %s" % self.__logTarget)
return "flushed"
+ @staticmethod
+ def setIPv6IsAllowed(value):
+ value = _as_bool(value) if value != 'auto' else None
+ return DNSUtils.setIPv6IsAllowed(value)
+
+ def setThreadOptions(self, value):
+ for o, v in value.iteritems():
+ if o == 'stacksize':
+ threading.stack_size(int(v)*1024)
+ else: # pragma: no cover
+ raise KeyError("unknown option %r" % o)
+
+ def getThreadOptions(self):
+ return {'stacksize': threading.stack_size() // 1024}
+
def setDatabase(self, filename):
# if not changed - nothing to do
if self.__db and self.__db.filename == filename:
@@ -742,6 +840,7 @@ class Server:
self.__db = None
else:
if Fail2BanDb is not None:
+ _make_file_path(filename)
self.__db = Fail2BanDb(filename)
self.__db.delAllJails()
else: # pragma: no cover
@@ -754,6 +853,26 @@ class Server:
def getDatabase(self):
return self.__db
+ @staticmethod
+ def __get_fdlist():
+ """Generate a list of open file descriptors.
+
+ This wouldn't work on some platforms, or if proc/fdescfs not mounted, or a chroot environment,
+ then it'd raise a FileExistsError.
+ """
+ for path in (
+ '/proc/self/fd', # Linux, Cygwin and NetBSD
+ '/proc/fd', # MacOS and FreeBSD
+ ):
+ if os.path.exists(path):
+ def fdlist():
+ for name in os.listdir(path):
+ if name.isdigit():
+ yield int(name)
+ return fdlist()
+ # other platform or unmounted, chroot etc:
+ raise FileExistsError("fd-list not found")
+
def __createDaemon(self): # pragma: no cover
""" Detach a process from the controlling terminal and run it in the
background as a daemon.
@@ -811,25 +930,37 @@ class Server:
# Signal to exit, parent of the first child.
return None
- # Close all open files. Try the system configuration variable, SC_OPEN_MAX,
+ # Close all open files. Try to obtain the range of open descriptors directly.
+ # As a fallback try the system configuration variable, SC_OPEN_MAX,
# for the maximum number of open files to close. If it doesn't exist, use
# the default value (configurable).
try:
- maxfd = os.sysconf("SC_OPEN_MAX")
- except (AttributeError, ValueError):
- maxfd = 256 # default maximum
+ fdlist = self.__get_fdlist()
+ maxfd = -1
+ except:
+ try:
+ maxfd = os.sysconf("SC_OPEN_MAX")
+ except (AttributeError, ValueError):
+ maxfd = 256 # default maximum
+ fdlist = xrange(maxfd+1)
# urandom should not be closed in Python 3.4.0. Fixed in 3.4.1
# http://bugs.python.org/issue21207
if sys.version_info[0:3] == (3, 4, 0): # pragma: no cover
urandom_fd = os.open("/dev/urandom", os.O_RDONLY)
- for fd in range(0, maxfd):
+ for fd in fdlist:
try:
if not os.path.sameopenfile(urandom_fd, fd):
os.close(fd)
except OSError: # ERROR (ignore)
pass
os.close(urandom_fd)
+ elif maxfd == -1:
+ for fd in fdlist:
+ try:
+ os.close(fd)
+ except OSError: # ERROR (ignore)
+ pass
else:
os.closerange(0, maxfd)
diff --git a/fail2ban/server/strptime.py b/fail2ban/server/strptime.py
index 498d284b..12be163a 100644
--- a/fail2ban/server/strptime.py
+++ b/fail2ban/server/strptime.py
@@ -30,17 +30,6 @@ locale_time = LocaleTime()
TZ_ABBR_RE = r"[A-Z](?:[A-Z]{2,4})?"
FIXED_OFFSET_TZ_RE = re.compile(r"(%s)?([+-][01]\d(?::?\d{2})?)?$" % (TZ_ABBR_RE,))
-def _getYearCentRE(cent=(0,3), distance=3, now=(MyTime.now(), MyTime.alternateNow)):
- """ Build century regex for last year and the next years (distance).
-
- Thereby respect possible run in the test-cases (alternate date used there)
- """
- cent = lambda year, f=cent[0], t=cent[1]: str(year)[f:t]
- exprset = set( cent(now[0].year + i) for i in (-1, distance) )
- if len(now) and now[1]:
- exprset |= set( cent(now[1].year + i) for i in (-1, distance) )
- return "(?:%s)" % "|".join(exprset) if len(exprset) > 1 else "".join(exprset)
-
timeRE = TimeRE()
# %k - one- or two-digit number giving the hour of the day (0-23) on a 24-hour clock,
@@ -63,20 +52,68 @@ timeRE['z'] = r"(?P<z>Z|UTC|GMT|[+-][01]\d(?::?\d{2})?)"
timeRE['ExZ'] = r"(?P<Z>%s)" % (TZ_ABBR_RE,)
timeRE['Exz'] = r"(?P<z>(?:%s)?[+-][01]\d(?::?\d{2})?|%s)" % (TZ_ABBR_RE, TZ_ABBR_RE)
+# overwrite default patterns, since they can be non-optimal:
+timeRE['d'] = r"(?P<d>[1-2]\d|[0 ]?[1-9]|3[0-1])"
+timeRE['m'] = r"(?P<m>0?[1-9]|1[0-2])"
+timeRE['Y'] = r"(?P<Y>\d{4})"
+timeRE['H'] = r"(?P<H>[0-1]?\d|2[0-3])"
+timeRE['M'] = r"(?P<M>[0-5]?\d)"
+timeRE['S'] = r"(?P<S>[0-5]?\d|6[0-1])"
+
# Extend build-in TimeRE with some exact patterns
# exact two-digit patterns:
-timeRE['Exd'] = r"(?P<d>3[0-1]|[1-2]\d|0[1-9])"
-timeRE['Exm'] = r"(?P<m>1[0-2]|0[1-9])"
-timeRE['ExH'] = r"(?P<H>2[0-3]|[0-1]\d)"
-timeRE['Exk'] = r" ?(?P<H>2[0-3]|[0-1]\d|\d)"
+timeRE['Exd'] = r"(?P<d>[1-2]\d|0[1-9]|3[0-1])"
+timeRE['Exm'] = r"(?P<m>0[1-9]|1[0-2])"
+timeRE['ExH'] = r"(?P<H>[0-1]\d|2[0-3])"
+timeRE['Exk'] = r" ?(?P<H>[0-1]?\d|2[0-3])"
timeRE['Exl'] = r" ?(?P<I>1[0-2]|\d)"
timeRE['ExM'] = r"(?P<M>[0-5]\d)"
-timeRE['ExS'] = r"(?P<S>6[0-1]|[0-5]\d)"
-# more precise year patterns, within same century of last year and
-# the next 3 years (for possible long uptime of fail2ban); thereby
-# respect possible run in the test-cases (alternate date used there):
-timeRE['ExY'] = r"(?P<Y>%s\d)" % _getYearCentRE(cent=(0,3), distance=3)
-timeRE['Exy'] = r"(?P<y>%s\d)" % _getYearCentRE(cent=(2,3), distance=3)
+timeRE['ExS'] = r"(?P<S>[0-5]\d|6[0-1])"
+
+def _updateTimeRE():
+ def _getYearCentRE(cent=(0,3), distance=3, now=(MyTime.now(), MyTime.alternateNow)):
+ """ Build century regex for last year and the next years (distance).
+
+ Thereby respect possible run in the test-cases (alternate date used there)
+ """
+ cent = lambda year, f=cent[0], t=cent[1]: str(year)[f:t]
+ def grp(exprset):
+ c = None
+ if len(exprset) > 1:
+ for i in exprset:
+ if c is None or i[0:-1] == c:
+ c = i[0:-1]
+ else:
+ c = None
+ break
+ if not c:
+ for i in exprset:
+ if c is None or i[0] == c:
+ c = i[0]
+ else:
+ c = None
+ break
+ if c:
+ return "%s%s" % (c, grp([i[len(c):] for i in exprset]))
+ return ("(?:%s)" % "|".join(exprset) if len(exprset[0]) > 1 else "[%s]" % "".join(exprset)) \
+ if len(exprset) > 1 else "".join(exprset)
+ exprset = set( cent(now[0].year + i) for i in (-1, distance) )
+ if len(now) > 1 and now[1]:
+ exprset |= set( cent(now[1].year + i) for i in xrange(-1, now[0].year-now[1].year+1, distance) )
+ return grp(sorted(list(exprset)))
+
+ # more precise year patterns, within same century of last year and
+ # the next 3 years (for possible long uptime of fail2ban); thereby
+ # consider possible run in the test-cases (alternate date used there),
+ # so accept years: 20xx (from test-date or 2001 up to current century)
+ timeRE['ExY'] = r"(?P<Y>%s\d)" % _getYearCentRE(cent=(0,3), distance=3,
+ now=(datetime.datetime.now(), datetime.datetime.fromtimestamp(
+ min(MyTime.alternateNowTime or 978393600, 978393600))
+ )
+ )
+ timeRE['Exy'] = r"(?P<y>\d{2})"
+
+_updateTimeRE()
def getTimePatternRE():
keys = timeRE.keys()
@@ -168,9 +205,9 @@ def reGroupDictStrptime(found_dict, msec=False, default_tz=None):
"""
now = \
- year = month = day = hour = minute = tzoffset = \
+ year = month = day = tzoffset = \
weekday = julian = week_of_year = None
- second = fraction = 0
+ hour = minute = second = fraction = 0
for key, val in found_dict.iteritems():
if val is None: continue
# Directives not explicitly handled below:
@@ -234,16 +271,12 @@ def reGroupDictStrptime(found_dict, msec=False, default_tz=None):
week_of_year = int(val)
# U starts week on Sunday, W - on Monday
week_of_year_start = 6 if key == 'U' else 0
- elif key == 'z':
+ elif key in ('z', 'Z'):
z = val
if z in ("Z", "UTC", "GMT"):
tzoffset = 0
else:
tzoffset = zone2offset(z, 0); # currently offset-based only
- elif key == 'Z':
- z = val
- if z in ("UTC", "GMT"):
- tzoffset = 0
# Fail2Ban will assume it's this year
assume_year = False
@@ -291,9 +324,8 @@ def reGroupDictStrptime(found_dict, msec=False, default_tz=None):
date_result -= datetime.timedelta(days=1)
if assume_year:
if not now: now = MyTime.now()
- if date_result > now:
- # Could be last year?
- # also reset month and day as it's not yesterday...
+ if date_result > now + datetime.timedelta(days=1): # ignore by timezone issues (+24h)
+ # assume last year - also reset month and day as it's not yesterday...
date_result = date_result.replace(
year=year-1, month=month, day=day)
diff --git a/fail2ban/server/ticket.py b/fail2ban/server/ticket.py
index 5debc79d..96e67773 100644
--- a/fail2ban/server/ticket.py
+++ b/fail2ban/server/ticket.py
@@ -33,7 +33,7 @@ logSys = getLogger(__name__)
class Ticket(object):
- __slots__ = ('_ip', '_flags', '_banCount', '_banTime', '_time', '_data', '_retry', '_lastReset')
+ __slots__ = ('_id', '_flags', '_banCount', '_banTime', '_time', '_data', '_retry', '_lastReset')
MAX_TIME = 0X7FFFFFFFFFFF ;# 4461763-th year
@@ -48,7 +48,7 @@ class Ticket(object):
@param matches (log) lines caused the ticket
"""
- self.setIP(ip)
+ self.setID(ip)
self._flags = 0;
self._banCount = 0;
self._banTime = None;
@@ -65,7 +65,7 @@ class Ticket(object):
def __str__(self):
return "%s: ip=%s time=%s bantime=%s bancount=%s #attempts=%d matches=%r" % \
- (self.__class__.__name__.split('.')[-1], self._ip, self._time,
+ (self.__class__.__name__.split('.')[-1], self._id, self._time,
self._banTime, self._banCount,
self._data['failures'], self._data.get('matches', []))
@@ -74,7 +74,7 @@ class Ticket(object):
def __eq__(self, other):
try:
- return self._ip == other._ip and \
+ return self._id == other._id and \
round(self._time, 2) == round(other._time, 2) and \
self._data == other._data
except AttributeError:
@@ -86,18 +86,17 @@ class Ticket(object):
if v is not None:
setattr(self, n, v)
-
- def setIP(self, value):
+ def setID(self, value):
# guarantee using IPAddr instead of unicode, str for the IP
if isinstance(value, basestring):
value = IPAddr(value)
- self._ip = value
+ self._id = value
def getID(self):
- return self._data.get('fid', self._ip)
+ return self._id
def getIP(self):
- return self._ip
+ return self._data.get('ip', self._id)
def setTime(self, value):
self._time = value
@@ -144,7 +143,13 @@ class Ticket(object):
return self._data['failures']
def setMatches(self, matches):
- self._data['matches'] = matches or []
+ if matches:
+ self._data['matches'] = matches
+ else:
+ try:
+ del self._data['matches']
+ except KeyError:
+ pass
def getMatches(self):
return [(line if not isinstance(line, (list, tuple)) else "".join(line)) \
@@ -209,20 +214,26 @@ class Ticket(object):
# return single value of data:
return self._data.get(key, default)
+ @property
+ def banEpoch(self):
+ return getattr(self, '_banEpoch', 0)
+ @banEpoch.setter
+ def banEpoch(self, value):
+ self._banEpoch = value
+
class FailTicket(Ticket):
def __init__(self, ip=None, time=None, matches=None, data={}, ticket=None):
# this class variables:
- self._retry = 0
- self._lastReset = None
+ self._firstTime = None
+ self._retry = 1
# create/copy using default ticket constructor:
Ticket.__init__(self, ip, time, matches, data, ticket)
# init:
- if ticket is None:
- self._lastReset = time if time is not None else self.getTime()
- if not self._retry:
- self._retry = self._data['failures'];
+ if not isinstance(ticket, FailTicket):
+ self._firstTime = time if time is not None else self.getTime()
+ self._retry = self._data.get('failures', 1)
def setRetry(self, value):
""" Set artificial retry count, normally equal failures / attempt,
@@ -239,7 +250,20 @@ class FailTicket(Ticket):
""" Returns failures / attempt count or
artificial retry count increased for bad IPs
"""
- return max(self._retry, self._data['failures'])
+ return self._retry
+
+ def adjustTime(self, time, maxTime):
+ """ Adjust time of ticket and current attempts count considering given maxTime
+ as estimation from rate by previous known interval (if it exceeds the findTime)
+ """
+ if time > self._time:
+ # expand current interval and attemps count (considering maxTime):
+ if self._firstTime < time - maxTime:
+ # adjust retry calculated as estimation from rate by previous known interval:
+ self._retry = int(round(self._retry / float(time - self._firstTime) * maxTime))
+ self._firstTime = time - maxTime
+ # last time of failure:
+ self._time = time
def inc(self, matches=None, attempt=1, count=1):
self._retry += count
@@ -251,19 +275,6 @@ class FailTicket(Ticket):
else:
self._data['matches'] = matches
- def setLastTime(self, value):
- if value > self._time:
- self._time = value
-
- def getLastTime(self):
- return self._time
-
- def getLastReset(self):
- return self._lastReset
-
- def setLastReset(self, value):
- self._lastReset = value
-
@staticmethod
def wrap(o):
o.__class__ = FailTicket
diff --git a/fail2ban/server/transmitter.py b/fail2ban/server/transmitter.py
index c24408c4..6de60f94 100644
--- a/fail2ban/server/transmitter.py
+++ b/fail2ban/server/transmitter.py
@@ -43,6 +43,7 @@ class Transmitter:
def __init__(self, server):
self.__server = server
+ self.__quiet = 0
##
# Proceeds a command.
@@ -57,7 +58,7 @@ class Transmitter:
ret = self.__commandHandler(command)
ack = 0, ret
except Exception as e:
- logSys.warning("Command %r has failed. Received %r",
+ logSys.error("Command %r has failed. Received %r",
command, e,
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
ack = 1, e
@@ -69,9 +70,10 @@ class Transmitter:
#
def __commandHandler(self, command):
- if command[0] == "ping":
+ name = command[0]
+ if name == "ping":
return "pong"
- elif command[0] == "add":
+ elif name == "add":
name = command[1]
if name == "--all":
raise Exception("Reserved name %r" % (name,))
@@ -81,11 +83,15 @@ class Transmitter:
backend = "auto"
self.__server.addJail(name, backend)
return name
- elif command[0] == "start":
+ elif name == "multi-set":
+ return self.__commandSet(command[1:], True)
+ elif name == "set":
+ return self.__commandSet(command[1:])
+ elif name == "start":
name = command[1]
self.__server.startJail(name)
return None
- elif command[0] == "stop":
+ elif name == "stop":
if len(command) == 1:
self.__server.quit()
elif command[1] == "--all":
@@ -94,47 +100,53 @@ class Transmitter:
name = command[1]
self.__server.stopJail(name)
return None
- elif command[0] == "reload":
+ elif name == "reload":
opts = command[1:3]
+ self.__quiet = 1
try:
self.__server.reloadJails(*opts, begin=True)
for cmd in command[3]:
self.__commandHandler(cmd)
finally:
+ self.__quiet = 0
self.__server.reloadJails(*opts, begin=False)
return 'OK'
- elif len(command) >= 2 and command[0] == "unban":
+ elif name == "unban" and len(command) >= 2:
# unban in all jails:
value = command[1:]
# if all ips:
if len(value) == 1 and value[0] == "--all":
return self.__server.setUnbanIP()
- cnt = 0
- for value in value:
- cnt += self.__server.setUnbanIP(None, value)
- return cnt
- elif command[0] == "echo":
+ return self.__server.setUnbanIP(None, value)
+ elif name == "banned":
+ # check IP is banned in all jails:
+ return self.__server.banned(None, command[1:])
+ elif name == "echo":
return command[1:]
- elif command[0] == "server-status":
+ elif name == "server-status":
logSys.debug("Status: ready")
return "Server ready"
- elif command[0] == "sleep":
+ elif name == "server-stream":
+ self.__quiet = 1
+ try:
+ for cmd in command[1]:
+ self.__commandHandler(cmd)
+ finally:
+ self.__quiet = 0
+ return None
+ elif name == "sleep":
value = command[1]
time.sleep(float(value))
return None
- elif command[0] == "flushlogs":
+ elif name == "flushlogs":
return self.__server.flushLogs()
- elif command[0] == "multi-set":
- return self.__commandSet(command[1:], True)
- elif command[0] == "set":
- return self.__commandSet(command[1:])
- elif command[0] == "get":
+ elif name == "get":
return self.__commandGet(command[1:])
- elif command[0] == "status":
+ elif name == "status":
return self.status(command[1:])
- elif command[0] == "version":
+ elif name == "version":
return version.version
- elif command[0] == "config-error":
+ elif name == "config-error":
logSys.error(command[1])
return None
raise Exception("Invalid command")
@@ -145,19 +157,31 @@ class Transmitter:
if name == "loglevel":
value = command[1]
self.__server.setLogLevel(value)
+ if self.__quiet: return
return self.__server.getLogLevel()
elif name == "logtarget":
value = command[1]
if self.__server.setLogTarget(value):
+ if self.__quiet: return
return self.__server.getLogTarget()
else:
raise Exception("Failed to change log target")
elif name == "syslogsocket":
value = command[1]
if self.__server.setSyslogSocket(value):
+ if self.__quiet: return
return self.__server.getSyslogSocket()
else:
raise Exception("Failed to change syslog socket")
+ elif name == "allowipv6":
+ value = command[1]
+ self.__server.setIPv6IsAllowed(value)
+ if self.__quiet: return
+ return value
+ #Thread
+ elif name == "thread":
+ value = command[1]
+ return self.__server.setThreadOptions(value)
#Database
elif name == "dbfile":
self.__server.setDatabase(command[1])
@@ -165,14 +189,25 @@ class Transmitter:
if db is None:
return None
else:
+ if self.__quiet: return
return db.filename
+ elif name == "dbmaxmatches":
+ db = self.__server.getDatabase()
+ if db is None:
+ logSys.log(logging.MSG, "dbmaxmatches setting was not in effect since no db yet")
+ return None
+ else:
+ db.maxMatches = int(command[1])
+ if self.__quiet: return
+ return db.maxMatches
elif name == "dbpurgeage":
db = self.__server.getDatabase()
if db is None:
- logSys.warning("dbpurgeage setting was not in effect since no db yet")
+ logSys.log(logging.MSG, "dbpurgeage setting was not in effect since no db yet")
return None
else:
db.purgeage = command[1]
+ if self.__quiet: return
return db.purgeage
# Jail
elif command[1] == "idle":
@@ -182,27 +217,33 @@ class Transmitter:
self.__server.setIdleJail(name, False)
else:
raise Exception("Invalid idle option, must be 'on' or 'off'")
+ if self.__quiet: return
return self.__server.getIdleJail(name)
# Filter
elif command[1] == "ignoreself":
value = command[2]
self.__server.setIgnoreSelf(name, value)
+ if self.__quiet: return
return self.__server.getIgnoreSelf(name)
elif command[1] == "addignoreip":
- value = command[2]
- self.__server.addIgnoreIP(name, value)
+ for value in command[2:]:
+ self.__server.addIgnoreIP(name, value)
+ if self.__quiet: return
return self.__server.getIgnoreIP(name)
elif command[1] == "delignoreip":
value = command[2]
self.__server.delIgnoreIP(name, value)
+ if self.__quiet: return
return self.__server.getIgnoreIP(name)
elif command[1] == "ignorecommand":
value = command[2]
self.__server.setIgnoreCommand(name, value)
+ if self.__quiet: return
return self.__server.getIgnoreCommand(name)
elif command[1] == "ignorecache":
value = command[2]
self.__server.setIgnoreCache(name, value)
+ if self.__quiet: return
return self.__server.getIgnoreCache(name)
elif command[1] == "addlogpath":
value = command[2]
@@ -215,93 +256,126 @@ class Transmitter:
elif len(command) > 4:
raise ValueError("Only one file can be added at a time")
self.__server.addLogPath(name, value, tail)
+ if self.__quiet: return
return self.__server.getLogPath(name)
elif command[1] == "dellogpath":
value = command[2]
self.__server.delLogPath(name, value)
+ if self.__quiet: return
return self.__server.getLogPath(name)
elif command[1] == "logencoding":
value = command[2]
self.__server.setLogEncoding(name, value)
+ if self.__quiet: return
return self.__server.getLogEncoding(name)
elif command[1] == "addjournalmatch": # pragma: systemd no cover
value = command[2:]
self.__server.addJournalMatch(name, value)
+ if self.__quiet: return
return self.__server.getJournalMatch(name)
elif command[1] == "deljournalmatch": # pragma: systemd no cover
value = command[2:]
self.__server.delJournalMatch(name, value)
+ if self.__quiet: return
return self.__server.getJournalMatch(name)
elif command[1] == "prefregex":
value = command[2]
self.__server.setPrefRegex(name, value)
- return self.__server.getPrefRegex(name)
+ if self.__quiet: return
+ v = self.__server.getPrefRegex(name)
+ return v.getRegex() if v else ""
elif command[1] == "addfailregex":
value = command[2]
self.__server.addFailRegex(name, value, multiple=multiple)
if multiple:
return True
+ if self.__quiet: return
return self.__server.getFailRegex(name)
elif command[1] == "delfailregex":
value = int(command[2])
self.__server.delFailRegex(name, value)
+ if self.__quiet: return
return self.__server.getFailRegex(name)
elif command[1] == "addignoreregex":
value = command[2]
self.__server.addIgnoreRegex(name, value, multiple=multiple)
if multiple:
return True
+ if self.__quiet: return
return self.__server.getIgnoreRegex(name)
elif command[1] == "delignoreregex":
value = int(command[2])
self.__server.delIgnoreRegex(name, value)
+ if self.__quiet: return
return self.__server.getIgnoreRegex(name)
elif command[1] == "usedns":
value = command[2]
self.__server.setUseDns(name, value)
+ if self.__quiet: return
return self.__server.getUseDns(name)
elif command[1] == "findtime":
value = command[2]
self.__server.setFindTime(name, value)
+ if self.__quiet: return
return self.__server.getFindTime(name)
elif command[1] == "datepattern":
value = command[2]
self.__server.setDatePattern(name, value)
+ if self.__quiet: return
return self.__server.getDatePattern(name)
elif command[1] == "logtimezone":
value = command[2]
self.__server.setLogTimeZone(name, value)
+ if self.__quiet: return
return self.__server.getLogTimeZone(name)
+ elif command[1] == "maxmatches":
+ value = command[2]
+ self.__server.setMaxMatches(name, int(value))
+ if self.__quiet: return
+ return self.__server.getMaxMatches(name)
elif command[1] == "maxretry":
value = command[2]
self.__server.setMaxRetry(name, int(value))
+ if self.__quiet: return
return self.__server.getMaxRetry(name)
elif command[1] == "maxlines":
value = command[2]
self.__server.setMaxLines(name, int(value))
+ if self.__quiet: return
return self.__server.getMaxLines(name)
# command
elif command[1] == "bantime":
value = command[2]
self.__server.setBanTime(name, value)
+ if self.__quiet: return
return self.__server.getBanTime(name)
+ elif command[1] == "attempt":
+ value = command[2:]
+ if self.__quiet: return
+ return self.__server.addAttemptIP(name, *value)
elif command[1].startswith("bantime."):
value = command[2]
opt = command[1][len("bantime."):]
self.__server.setBanTimeExtra(name, opt, value)
+ if self.__quiet: return
return self.__server.getBanTimeExtra(name, opt)
elif command[1] == "banip":
- value = command[2]
+ value = command[2:]
return self.__server.setBanIP(name,value)
elif command[1] == "unbanip":
- value = command[2]
- self.__server.setUnbanIP(name, value)
- return value
+ ifexists = True
+ if command[2] != "--report-absent":
+ value = command[2:]
+ else:
+ ifexists = False
+ value = command[3:]
+ return self.__server.setUnbanIP(name, value, ifexists=ifexists)
elif command[1] == "addaction":
args = [command[2]]
if len(command) > 3:
args.extend([command[3], json.loads(command[4])])
self.__server.addAction(name, *args)
+ if self.__quiet: return
return args[0]
elif command[1] == "delaction":
value = command[2]
@@ -325,10 +399,12 @@ class Transmitter:
actionkey = command[3]
if callable(getattr(action, actionkey, None)):
actionvalue = json.loads(command[4]) if len(command)>4 else {}
+ if self.__quiet: return
return getattr(action, actionkey)(**actionvalue)
else:
actionvalue = command[4]
setattr(action, actionkey, actionvalue)
+ if self.__quiet: return
return getattr(action, actionkey)
raise Exception("Invalid command %r (no set action or not yet implemented)" % (command[1],))
@@ -341,6 +417,9 @@ class Transmitter:
return self.__server.getLogTarget()
elif name == "syslogsocket":
return self.__server.getSyslogSocket()
+ #Thread
+ elif name == "thread":
+ return self.__server.getThreadOptions()
#Database
elif name == "dbfile":
db = self.__server.getDatabase()
@@ -348,13 +427,22 @@ class Transmitter:
return None
else:
return db.filename
+ elif name == "dbmaxmatches":
+ db = self.__server.getDatabase()
+ if db is None:
+ return None
+ else:
+ return db.maxMatches
elif name == "dbpurgeage":
db = self.__server.getDatabase()
if db is None:
return None
else:
return db.purgeage
- # Filter
+ # Jail, Filter
+ elif command[1] == "banned":
+ # check IP is banned in all jails:
+ return self.__server.banned(name, command[2:])
elif command[1] == "logpath":
return self.__server.getLogPath(name)
elif command[1] == "logencoding":
@@ -370,7 +458,8 @@ class Transmitter:
elif command[1] == "ignorecache":
return self.__server.getIgnoreCache(name)
elif command[1] == "prefregex":
- return self.__server.getPrefRegex(name)
+ v = self.__server.getPrefRegex(name)
+ return v.getRegex() if v else ""
elif command[1] == "failregex":
return self.__server.getFailRegex(name)
elif command[1] == "ignoreregex":
@@ -383,6 +472,8 @@ class Transmitter:
return self.__server.getDatePattern(name)
elif command[1] == "logtimezone":
return self.__server.getLogTimeZone(name)
+ elif command[1] == "maxmatches":
+ return self.__server.getMaxMatches(name)
elif command[1] == "maxretry":
return self.__server.getMaxRetry(name)
elif command[1] == "maxlines":
@@ -390,6 +481,9 @@ class Transmitter:
# Action
elif command[1] == "bantime":
return self.__server.getBanTime(name)
+ elif command[1] == "banip":
+ return self.__server.getBanList(name,
+ withTime=len(command) > 2 and command[2] == "--with-time")
elif command[1].startswith("bantime."):
opt = command[1][len("bantime."):]
return self.__server.getBanTimeExtra(name, opt)
diff --git a/fail2ban/server/utils.py b/fail2ban/server/utils.py
index b59fb4e1..18073ea7 100644
--- a/fail2ban/server/utils.py
+++ b/fail2ban/server/utils.py
@@ -27,8 +27,10 @@ import os
import signal
import subprocess
import sys
+from threading import Lock
import time
from ..helpers import getLogger, _merge_dicts, uni_decode
+from collections import OrderedDict
if sys.version_info >= (3, 3):
import importlib.machinery
@@ -69,7 +71,8 @@ class Utils():
def __init__(self, *args, **kwargs):
self.setOptions(*args, **kwargs)
- self._cache = {}
+ self._cache = OrderedDict()
+ self.__lock = Lock()
def setOptions(self, maxCount=1000, maxTime=60):
self.maxCount = maxCount
@@ -83,27 +86,32 @@ class Utils():
if v:
if v[1] > time.time():
return v[0]
- del self._cache[k]
+ self.unset(k)
return defv
def set(self, k, v):
t = time.time()
- cache = self._cache # for shorter local access
- # clean cache if max count reached:
- if len(cache) >= self.maxCount:
- for (ck, cv) in cache.items():
- if cv[1] < t:
- del cache[ck]
- # if still max count - remove any one:
+ # avoid multiple modification of dict multi-threaded:
+ cache = self._cache
+ with self.__lock:
+ # clean cache if max count reached:
if len(cache) >= self.maxCount:
- cache.popitem()
- cache[k] = (v, t + self.maxTime)
+ # ordered (so remove some from ahead, FIFO)
+ while cache:
+ (ck, cv) = cache.popitem(last=False)
+ # if not yet expired (but has free slot for new entry):
+ if cv[1] > t and len(cache) < self.maxCount:
+ break
+ # set now:
+ cache[k] = (v, t + self.maxTime)
def unset(self, k):
- try:
- del self._cache[k]
- except KeyError:
- pass
+ with self.__lock:
+ self._cache.pop(k, None)
+
+ def clear(self):
+ with self.__lock:
+ self._cache.clear()
@staticmethod
@@ -224,8 +232,8 @@ class Utils():
return False if not output else (False, stdout, stderr, retcode)
std_level = logging.DEBUG if retcode in success_codes else logging.ERROR
- if std_level > logSys.getEffectiveLevel():
- if logCmd: logCmd(std_level-1); logCmd = None
+ if std_level >= logSys.getEffectiveLevel():
+ if logCmd: logCmd(std_level-1 if std_level == logging.DEBUG else logging.ERROR); logCmd = None
# if we need output (to return or to log it):
if output or std_level >= logSys.getEffectiveLevel():
@@ -240,7 +248,6 @@ class Utils():
if stdout is not None and stdout != '' and std_level >= logSys.getEffectiveLevel():
for l in stdout.splitlines():
logSys.log(std_level, "%x -- stdout: %r", realCmdId, uni_decode(l))
- popen.stdout.close()
if popen.stderr:
try:
if retcode is None or retcode < 0:
@@ -251,7 +258,9 @@ class Utils():
if stderr is not None and stderr != '' and std_level >= logSys.getEffectiveLevel():
for l in stderr.splitlines():
logSys.log(std_level, "%x -- stderr: %r", realCmdId, uni_decode(l))
- popen.stderr.close()
+
+ if popen.stdout: popen.stdout.close()
+ if popen.stderr: popen.stderr.close()
success = False
if retcode in success_codes:
@@ -307,11 +316,9 @@ class Utils():
timeout_expr = lambda: time.time() > time0
else:
timeout_expr = timeout
- if not interval:
- interval = Utils.DEFAULT_SLEEP_INTERVAL
if timeout_expr():
break
- stm = min(stm + interval, Utils.DEFAULT_SLEEP_TIME)
+ stm = min(stm + (interval or Utils.DEFAULT_SLEEP_INTERVAL), Utils.DEFAULT_SLEEP_TIME)
time.sleep(stm)
return ret