summaryrefslogtreecommitdiff
path: root/lib/ansible/modules/extras/monitoring
diff options
context:
space:
mode:
Diffstat (limited to 'lib/ansible/modules/extras/monitoring')
-rw-r--r--lib/ansible/modules/extras/monitoring/__init__.py0
-rw-r--r--lib/ansible/modules/extras/monitoring/airbrake_deployment.py131
-rw-r--r--lib/ansible/modules/extras/monitoring/bigpanda.py180
-rw-r--r--lib/ansible/modules/extras/monitoring/boundary_meter.py264
-rw-r--r--lib/ansible/modules/extras/monitoring/circonus_annotation.py147
-rw-r--r--lib/ansible/modules/extras/monitoring/datadog_event.py165
-rw-r--r--lib/ansible/modules/extras/monitoring/datadog_monitor.py310
-rw-r--r--lib/ansible/modules/extras/monitoring/honeybadger_deployment.py141
-rw-r--r--lib/ansible/modules/extras/monitoring/librato_annotation.py161
-rw-r--r--lib/ansible/modules/extras/monitoring/logentries.py147
-rw-r--r--lib/ansible/modules/extras/monitoring/logicmonitor.py2169
-rw-r--r--lib/ansible/modules/extras/monitoring/logicmonitor_facts.py632
-rw-r--r--lib/ansible/modules/extras/monitoring/monit.py185
-rw-r--r--lib/ansible/modules/extras/monitoring/nagios.py1030
-rw-r--r--lib/ansible/modules/extras/monitoring/newrelic_deployment.py147
-rw-r--r--lib/ansible/modules/extras/monitoring/pagerduty.py299
-rw-r--r--lib/ansible/modules/extras/monitoring/pagerduty_alert.py213
-rw-r--r--lib/ansible/modules/extras/monitoring/pingdom.py152
-rw-r--r--lib/ansible/modules/extras/monitoring/rollbar_deployment.py134
-rw-r--r--lib/ansible/modules/extras/monitoring/sensu_check.py384
-rw-r--r--lib/ansible/modules/extras/monitoring/sensu_subscription.py161
-rw-r--r--lib/ansible/modules/extras/monitoring/stackdriver.py216
-rw-r--r--lib/ansible/modules/extras/monitoring/statusio_maintenance.py480
-rw-r--r--lib/ansible/modules/extras/monitoring/uptimerobot.py168
-rw-r--r--lib/ansible/modules/extras/monitoring/zabbix_group.py225
-rw-r--r--lib/ansible/modules/extras/monitoring/zabbix_host.py562
-rw-r--r--lib/ansible/modules/extras/monitoring/zabbix_hostmacro.py243
-rw-r--r--lib/ansible/modules/extras/monitoring/zabbix_maintenance.py377
-rw-r--r--lib/ansible/modules/extras/monitoring/zabbix_screen.py435
29 files changed, 9858 insertions, 0 deletions
diff --git a/lib/ansible/modules/extras/monitoring/__init__.py b/lib/ansible/modules/extras/monitoring/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/__init__.py
diff --git a/lib/ansible/modules/extras/monitoring/airbrake_deployment.py b/lib/ansible/modules/extras/monitoring/airbrake_deployment.py
new file mode 100644
index 0000000000..262c3d2b44
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/airbrake_deployment.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: airbrake_deployment
+version_added: "1.2"
+author: "Bruce Pennypacker (@bpennypacker)"
+short_description: Notify airbrake about app deployments
+description:
+ - Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking)
+options:
+ token:
+ description:
+ - API token.
+ required: true
+ environment:
+ description:
+ - The airbrake environment name, typically 'production', 'staging', etc.
+ required: true
+ user:
+ description:
+ - The username of the person doing the deployment
+ required: false
+ repo:
+ description:
+ - URL of the project repository
+ required: false
+ revision:
+ description:
+ - A hash, number, tag, or other identifier showing what revision was deployed
+ required: false
+ url:
+ description:
+ - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
+ required: false
+ default: "https://airbrake.io/deploys.txt"
+ version_added: "1.5"
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- airbrake_deployment: token=AAAAAA
+ environment='staging'
+ user='ansible'
+ revision=4.2
+'''
+
+import urllib
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ user=dict(required=False),
+ repo=dict(required=False),
+ revision=dict(required=False),
+ url=dict(required=False, default='https://api.airbrake.io/deploys.txt'),
+ validate_certs=dict(default='yes', type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ # build list of params
+ params = {}
+
+ if module.params["environment"]:
+ params["deploy[rails_env]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[scm_repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[scm_revision]"] = module.params["revision"]
+
+ params["api_key"] = module.params["token"]
+
+ url = module.params.get('url')
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Send the data to airbrake
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
+
diff --git a/lib/ansible/modules/extras/monitoring/bigpanda.py b/lib/ansible/modules/extras/monitoring/bigpanda.py
new file mode 100644
index 0000000000..df8e55fd74
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/bigpanda.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: bigpanda
+author: "Hagai Kariti (@hkariti)"
+short_description: Notify BigPanda about deployments
+version_added: "1.8"
+description:
+ - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
+options:
+ component:
+ description:
+ - "The name of the component being deployed. Ex: billing"
+ required: true
+ alias: name
+ version:
+ description:
+ - The deployment version.
+ required: true
+ token:
+ description:
+ - API token.
+ required: true
+ state:
+ description:
+ - State of the deployment.
+ required: true
+ choices: ['started', 'finished', 'failed']
+ hosts:
+ description:
+ - Name of affected host name. Can be a list.
+ required: false
+ default: machine's hostname
+ alias: host
+ env:
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: false
+ owner:
+ description:
+ - The person responsible for the deployment.
+ required: false
+ description:
+ description:
+ - Free text description of the deployment.
+ required: false
+ url:
+ description:
+ - Base URL of the API server.
+ required: False
+ default: https://api.bigpanda.io
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+# informational: requirements for nodes
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started
+...
+- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=finished
+
+If outside servers aren't reachable from your machine, use local_action and override hosts:
+- local_action: bigpanda component=myapp version=1.3 token={{ bigpanda_token }} hosts={{ansible_hostname}} state=started
+ register: deployment
+...
+- local_action: bigpanda component=deployment.component version=deployment.version token=deployment.token state=finished
+'''
+
+# ===========================================
+# Module execution.
+#
+import socket
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ component=dict(required=True, aliases=['name']),
+ version=dict(required=True),
+ token=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['started', 'finished', 'failed']),
+ hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']),
+ env=dict(required=False),
+ owner=dict(required=False),
+ description=dict(required=False),
+ message=dict(required=False),
+ source_system=dict(required=False, default='ansible'),
+ validate_certs=dict(default='yes', type='bool'),
+ url=dict(required=False, default='https://api.bigpanda.io'),
+ ),
+ supports_check_mode=True,
+ check_invalid_arguments=False,
+ )
+
+ token = module.params['token']
+ state = module.params['state']
+ url = module.params['url']
+
+ # Build the common request body
+ body = dict()
+ for k in ('component', 'version', 'hosts'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+
+ if not isinstance(body['hosts'], list):
+ body['hosts'] = [body['hosts']]
+
+ # Insert state-specific attributes to body
+ if state == 'started':
+ for k in ('source_system', 'env', 'owner', 'description'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+
+ request_url = url + '/data/events/deployments/start'
+ else:
+ message = module.params['message']
+ if message is not None:
+ body['errorMessage'] = message
+
+ if state == 'finished':
+ body['status'] = 'success'
+ else:
+ body['status'] = 'failure'
+
+ request_url = url + '/data/events/deployments/end'
+
+ # Build the deployment object we return
+ deployment = dict(token=token, url=url)
+ deployment.update(body)
+ if 'errorMessage' in deployment:
+ message = deployment.pop('errorMessage')
+ deployment['message'] = message
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True, **deployment)
+
+ # Send the data to bigpanda
+ data = json.dumps(body)
+ headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'}
+ try:
+ response, info = fetch_url(module, request_url, data=data, headers=headers)
+ if info['status'] == 200:
+ module.exit_json(changed=True, **deployment)
+ else:
+ module.fail_json(msg=json.dumps(info))
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/boundary_meter.py b/lib/ansible/modules/extras/monitoring/boundary_meter.py
new file mode 100644
index 0000000000..3729b606a1
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/boundary_meter.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to add boundary meters.
+
+(c) 2013, curtis <curtis@serverascode.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+import datetime
+import base64
+import os
+
+DOCUMENTATION = '''
+
+module: boundary_meter
+short_description: Manage boundary meters
+description:
+ - This module manages boundary meters
+version_added: "1.3"
+author: "curtis (@ccollicutt)"
+requirements:
+ - Boundary API access
+ - bprobe is required to send data, but not to register a meter
+options:
+ name:
+ description:
+ - meter name
+ required: true
+ state:
+ description:
+ - Whether to create or remove the client from boundary
+ required: false
+ default: true
+ choices: ["present", "absent"]
+ apiid:
+ description:
+ - Organizations boundary API ID
+ required: true
+ apikey:
+ description:
+ - Organizations boundary API KEY
+ required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+
+notes:
+ - This module does not yet support boundary tags.
+
+'''
+
+EXAMPLES='''
+- name: Create meter
+ boundary_meter: apiid=AAAAAA api_key=BBBBBB state=present name={{ inventory_hostname }}"
+
+- name: Delete meter
+ boundary_meter: apiid=AAAAAA api_key=BBBBBB state=absent name={{ inventory_hostname }}"
+
+'''
+
+api_host = "api.boundary.com"
+config_directory = "/etc/bprobe"
+
+# "resource" like thing or apikey?
+def auth_encode(apikey):
+ auth = base64.standard_b64encode(apikey)
+ auth.replace("\n", "")
+ return auth
+
+def build_url(name, apiid, action, meter_id=None, cert_type=None):
+ if action == "create":
+ return 'https://%s/%s/meters' % (api_host, apiid)
+ elif action == "search":
+ return "https://%s/%s/meters?name=%s" % (api_host, apiid, name)
+ elif action == "certificates":
+ return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type)
+ elif action == "tags":
+ return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
+ elif action == "delete":
+ return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
+
+def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
+
+ if meter_id is None:
+ url = build_url(name, apiid, action)
+ else:
+ if cert_type is None:
+ url = build_url(name, apiid, action, meter_id)
+ else:
+ url = build_url(name, apiid, action, meter_id, cert_type)
+
+ headers = dict()
+ headers["Authorization"] = "Basic %s" % auth_encode(apikey)
+ headers["Content-Type"] = "application/json"
+
+ return fetch_url(module, url, data=data, headers=headers)
+
+def create_meter(module, name, apiid, apikey):
+
+ meters = search_meter(module, name, apiid, apikey)
+
+ if len(meters) > 0:
+ # If the meter already exists, do nothing
+ module.exit_json(status="Meter " + name + " already exists",changed=False)
+ else:
+ # If it doesn't exist, create it
+ body = '{"name":"' + name + '"}'
+ response, info = http_request(module, name, apiid, apikey, data=body, action="create")
+
+ if info['status'] != 200:
+ module.fail_json(msg="Failed to connect to api host to create meter")
+
+ # If the config directory doesn't exist, create it
+ if not os.path.exists(config_directory):
+ try:
+ os.makedirs(config_directory)
+ except:
+ module.fail_json("Could not create " + config_directory)
+
+
+ # Download both cert files from the api host
+ types = ['key', 'cert']
+ for cert_type in types:
+ try:
+ # If we can't open the file it's not there, so we should download it
+ cert_file = open('%s/%s.pem' % (config_directory,cert_type))
+ except IOError:
+ # Now download the file...
+ rc = download_request(module, name, apiid, apikey, cert_type)
+ if rc == False:
+ module.fail_json("Download request for " + cert_type + ".pem failed")
+
+ return 0, "Meter " + name + " created"
+
+def search_meter(module, name, apiid, apikey):
+
+ response, info = http_request(module, name, apiid, apikey, action="search")
+
+ if info['status'] != 200:
+ module.fail_json("Failed to connect to api host to search for meter")
+
+ # Return meters
+ return json.loads(response.read())
+
+def get_meter_id(module, name, apiid, apikey):
+ # In order to delete the meter we need its id
+ meters = search_meter(module, name, apiid, apikey)
+
+ if len(meters) > 0:
+ return meters[0]['id']
+ else:
+ return None
+
+def delete_meter(module, name, apiid, apikey):
+
+ meter_id = get_meter_id(module, name, apiid, apikey)
+
+ if meter_id is None:
+ return 1, "Meter does not exist, so can't delete it"
+ else:
+ response, info = http_request(module, name, apiid, apikey, action, meter_id)
+ if info['status'] != 200:
+ module.fail_json("Failed to delete meter")
+
+ # Each new meter gets a new key.pem and ca.pem file, so they should be deleted
+ types = ['cert', 'key']
+ for cert_type in types:
+ try:
+ cert_file = '%s/%s.pem' % (config_directory,cert_type)
+ os.remove(cert_file)
+ except OSError, e:
+ module.fail_json("Failed to remove " + cert_type + ".pem file")
+
+ return 0, "Meter " + name + " deleted"
+
+def download_request(module, name, apiid, apikey, cert_type):
+
+ meter_id = get_meter_id(module, name, apiid, apikey)
+
+ if meter_id is not None:
+ action = "certificates"
+ response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type)
+ if info['status'] != 200:
+ module.fail_json("Failed to connect to api host to download certificate")
+
+ if result:
+ try:
+ cert_file_path = '%s/%s.pem' % (config_directory,cert_type)
+ body = response.read()
+ cert_file = open(cert_file_path, 'w')
+ cert_file.write(body)
+ cert_file.close()
+ os.chmod(cert_file_path, int('0600', 8))
+ except:
+ module.fail_json("Could not write to certificate file")
+
+ return True
+ else:
+ module.fail_json("Could not get meter id")
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=False),
+ apikey=dict(required=True),
+ apiid=dict(required=True),
+ validate_certs = dict(default='yes', type='bool'),
+ )
+ )
+
+ state = module.params['state']
+ name= module.params['name']
+ apikey = module.params['api_key']
+ apiid = module.params['api_id']
+
+ if state == "present":
+ (rc, result) = create_meter(module, name, apiid, apikey)
+
+ if state == "absent":
+ (rc, result) = delete_meter(module, name, apiid, apikey)
+
+ if rc != 0:
+ module.fail_json(msg=result)
+
+ module.exit_json(status=result,changed=True)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/monitoring/circonus_annotation.py b/lib/ansible/modules/extras/monitoring/circonus_annotation.py
new file mode 100644
index 0000000000..9c5fbbb0fd
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/circonus_annotation.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014-2015, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+import requests
+import time
+import json
+
+DOCUMENTATION = '''
+---
+module: circonus_annotation
+short_description: create an annotation in circonus
+description:
+ - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
+author: "Nick Harring (@NickatEpic)"
+version_added: 2.0
+requirements:
+ - urllib3
+ - requests
+ - time
+options:
+ api_key:
+ description:
+ - Circonus API key
+ required: true
+ category:
+ description:
+ - Annotation Category
+ required: true
+ description:
+ description:
+ - Description of annotation
+ required: true
+ title:
+ description:
+ - Title of annotation
+ required: true
+ start:
+ description:
+ - Unix timestamp of event start, defaults to now
+ required: false
+ stop:
+ description:
+ - Unix timestamp of event end, defaults to now + duration
+ required: false
+ duration:
+ description:
+ - Duration in seconds of annotation, defaults to 0
+ required: false
+'''
+EXAMPLES = '''
+# Create a simple annotation event with a source, defaults to start and end time of now
+- circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: 'App Config Change'
+ description: 'This is a detailed description of the config change'
+ category: 'This category groups like annotations'
+# Create an annotation with a duration of 5 minutes and a default start time of now
+- circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: 'App Config Change'
+ description: 'This is a detailed description of the config change'
+ category: 'This category groups like annotations'
+ duration: 300
+# Create an annotation with a start_time and end_time
+- circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: 'App Config Change'
+ description: 'This is a detailed description of the config change'
+ category: 'This category groups like annotations'
+ start_time: 1395940006
+ end_time: 1395954407
+'''
+def post_annotation(annotation, api_key):
+ ''' Takes annotation dict and api_key string'''
+ base_url = 'https://api.circonus.com/v2'
+ anootate_post_endpoint = '/annotation'
+ resp = requests.post(base_url + anootate_post_endpoint,
+ headers=build_headers(api_key), data=json.dumps(annotation))
+ resp.raise_for_status()
+ return resp
+
+def create_annotation(module):
+ ''' Takes ansible module object '''
+ annotation = {}
+ if module.params['duration'] != None:
+ duration = module.params['duration']
+ else:
+ duration = 0
+ if module.params['start'] != None:
+ start = module.params['start']
+ else:
+ start = int(time.time())
+ if module.params['stop'] != None:
+ stop = module.params['stop']
+ else:
+ stop = int(time.time())+ duration
+ annotation['start'] = int(start)
+ annotation['stop'] = int(stop)
+ annotation['category'] = module.params['category']
+ annotation['description'] = module.params['description']
+ annotation['title'] = module.params['title']
+ return annotation
+def build_headers(api_token):
+ '''Takes api token, returns headers with it included.'''
+ headers = {'X-Circonus-App-Name': 'ansible',
+ 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
+ 'Accept': 'application/json'}
+ return headers
+
+def main():
+ '''Main function, dispatches logic'''
+ module = AnsibleModule(
+ argument_spec=dict(
+ start=dict(required=False, type='int'),
+ stop=dict(required=False, type='int'),
+ category=dict(required=True),
+ title=dict(required=True),
+ description=dict(required=True),
+ duration=dict(required=False, type='int'),
+ api_key=dict(required=True, no_log=True)
+ )
+ )
+ annotation = create_annotation(module)
+ try:
+ resp = post_annotation(annotation, module.params['api_key'])
+ except requests.exceptions.RequestException, err_str:
+ module.fail_json(msg='Request Failed', reason=err_str)
+ module.exit_json(changed=True, annotation=resp.json())
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/datadog_event.py b/lib/ansible/modules/extras/monitoring/datadog_event.py
new file mode 100644
index 0000000000..88d921bf91
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/datadog_event.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
+# Author: Naoya Nakazawa <naoya.n@gmail.com>
+#
+# This module is proudly sponsored by iGeolise (www.igeolise.com) and
+# Tiny Lab Productions (www.tinylabproductions.com).
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Import Datadog
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except:
+ HAS_DATADOG = False
+
+DOCUMENTATION = '''
+---
+module: datadog_event
+short_description: Posts events to DataDog service
+description:
+- "Allows to post events to DataDog (www.datadoghq.com) service."
+- "Uses http://docs.datadoghq.com/api/#events API."
+version_added: "1.3"
+author:
+- "Artūras `arturaz` Šlajus (@arturaz)"
+- "Naoya Nakazawa (@n0ts)"
+notes: []
+requirements: []
+options:
+ api_key:
+ description: ["Your DataDog API key."]
+ required: true
+ default: null
+ app_key:
+ description: ["Your DataDog app key."]
+ required: true
+ version_added: "2.2"
+ title:
+ description: ["The event title."]
+ required: true
+ default: null
+ text:
+ description: ["The body of the event."]
+ required: true
+ default: null
+ date_happened:
+ description:
+ - POSIX timestamp of the event.
+ - Default value is now.
+ required: false
+ default: now
+ priority:
+ description: ["The priority of the event."]
+ required: false
+ default: normal
+ choices: [normal, low]
+ tags:
+ description: ["Comma separated list of tags to apply to the event."]
+ required: false
+ default: null
+ alert_type:
+ description: ["Type of alert."]
+ required: false
+ default: info
+ choices: ['error', 'warning', 'info', 'success']
+ aggregation_key:
+ description: ["An arbitrary string to use for aggregation."]
+ required: false
+ default: null
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+'''
+
+EXAMPLES = '''
+# Post an event with low priority
+datadog_event: title="Testing from ansible" text="Test!" priority="low"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN"
+# Post an event with several tags
+datadog_event: title="Testing from ansible" text="Test!"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN"
+ tags=aa,bb,#host:{{ inventory_hostname }}
+'''
+
+# Import Datadog
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
+ title=dict(required=True),
+ text=dict(required=True),
+ date_happened=dict(required=False, default=None, type='int'),
+ priority=dict(
+ required=False, default='normal', choices=['normal', 'low']
+ ),
+ tags=dict(required=False, default=None, type='list'),
+ alert_type=dict(
+ required=False, default='info',
+ choices=['error', 'warning', 'info', 'success']
+ ),
+ aggregation_key=dict(required=False, default=None),
+ validate_certs = dict(default='yes', type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg='datadogpy required for this module')
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ _post_event(module)
+
+
+def _post_event(module):
+ try:
+ msg = api.Event.create(title=module.params['title'],
+ text=module.params['text'],
+ tags=module.params['tags'],
+ priority=module.params['priority'],
+ alert_type=module.params['alert_type'],
+ aggregation_key=module.params['aggregation_key'],
+ source_type_name='ansible')
+ if msg['status'] != 'ok':
+ module.fail_json(msg=msg)
+
+ module.exit_json(changed=True, msg=msg)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/datadog_monitor.py b/lib/ansible/modules/extras/monitoring/datadog_monitor.py
new file mode 100644
index 0000000000..208dc73305
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/datadog_monitor.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# import module snippets
+
+# Import Datadog
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except:
+ HAS_DATADOG = False
+
+DOCUMENTATION = '''
+---
+module: datadog_monitor
+short_description: Manages Datadog monitors
+description:
+- "Manages monitors within Datadog"
+- "Options like described on http://docs.datadoghq.com/api/"
+version_added: "2.0"
+author: "Sebastian Kornehl (@skornehl)"
+notes: []
+requirements: [datadog]
+options:
+ api_key:
+ description: ["Your DataDog API key."]
+ required: true
+ app_key:
+ description: ["Your DataDog app key."]
+ required: true
+ state:
+ description: ["The designated state of the monitor."]
+ required: true
+ choices: ['present', 'absent', 'muted', 'unmuted']
+ tags:
+ description: ["A list of tags to associate with your monitor when creating or updating. This can help you categorize and filter monitors."]
+ required: false
+ default: None
+ version_added: 2.2
+ type:
+ description:
+ - "The type of the monitor."
+ - The 'event alert'is available starting at Ansible 2.1
+ required: false
+ default: null
+ choices: ['metric alert', 'service check', 'event alert']
+ query:
+ description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
+ required: false
+ default: null
+ name:
+ description: ["The name of the alert."]
+ required: true
+ message:
+ description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'."]
+ required: false
+ default: null
+ silenced:
+ description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "]
+ required: false
+ default: ""
+ notify_no_data:
+ description: ["A boolean indicating whether this monitor will notify when data stops reporting.."]
+ required: false
+ default: False
+ no_data_timeframe:
+ description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."]
+ required: false
+ default: 2x timeframe for metric, 2 minutes for service
+ timeout_h:
+ description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."]
+ required: false
+ default: null
+ renotify_interval:
+ description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."]
+ required: false
+ default: null
+ escalation_message:
+ description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"]
+ required: false
+ default: null
+ notify_audit:
+ description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."]
+ required: false
+ default: False
+ thresholds:
+ description: ["A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have multiple thresholds, we don't define them directly in the query."]
+ required: false
+ default: {'ok': 1, 'critical': 1, 'warning': 1}
+ locked:
+ description: ["A boolean indicating whether changes to this monitor should be restricted to the creator or admins."]
+ required: false
+ default: False
+ version_added: 2.2
+'''
+
+EXAMPLES = '''
+# Create a metric monitor
+datadog_monitor:
+ type: "metric alert"
+ name: "Test monitor"
+ state: "present"
+ query: "datadog.agent.up".over("host:host1").last(2).count_by_status()"
+ message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+# Deletes a monitor
+datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+# Mutes a monitor
+datadog_monitor:
+ name: "Test monitor"
+ state: "mute"
+ silenced: '{"*":None}'
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+# Unmutes a monitor
+datadog_monitor:
+ name: "Test monitor"
+ state: "unmute"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+'''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
+ state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']),
+ type=dict(required=False, choises=['metric alert', 'service check', 'event alert']),
+ name=dict(required=True),
+ query=dict(required=False),
+ message=dict(required=False, default=None),
+ silenced=dict(required=False, default=None, type='dict'),
+ notify_no_data=dict(required=False, default=False, type='bool'),
+ no_data_timeframe=dict(required=False, default=None),
+ timeout_h=dict(required=False, default=None),
+ renotify_interval=dict(required=False, default=None),
+ escalation_message=dict(required=False, default=None),
+ notify_audit=dict(required=False, default=False, type='bool'),
+ thresholds=dict(required=False, type='dict', default=None),
+ tags=dict(required=False, type='list', default=None),
+ locked=dict(required=False, default=False, type='bool')
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg='datadogpy required for this module')
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ if module.params['state'] == 'present':
+ install_monitor(module)
+ elif module.params['state'] == 'absent':
+ delete_monitor(module)
+ elif module.params['state'] == 'mute':
+ mute_monitor(module)
+ elif module.params['state'] == 'unmute':
+ unmute_monitor(module)
+
+def _fix_template_vars(message):
+ return message.replace('[[', '{{').replace(']]', '}}')
+
+
+def _get_monitor(module):
+ for monitor in api.Monitor.get_all():
+ if monitor['name'] == module.params['name']:
+ return monitor
+ return {}
+
+
+def _post_monitor(module, options):
+ try:
+ kwargs = dict(type=module.params['type'], query=module.params['query'],
+ name=module.params['name'], message=_fix_template_vars(module.params['message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.create(**kwargs)
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+def _equal_dicts(a, b, ignore_keys):
+ ka = set(a).difference(ignore_keys)
+ kb = set(b).difference(ignore_keys)
+ return ka == kb and all(a[k] == b[k] for k in ka)
+
+def _update_monitor(module, monitor, options):
+ try:
+ kwargs = dict(id=monitor['id'], query=module.params['query'],
+ name=module.params['name'], message=_fix_template_vars(module.params['message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.update(**kwargs)
+
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified']):
+ module.exit_json(changed=False, msg=msg)
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+
+def install_monitor(module):
+ options = {
+ "silenced": module.params['silenced'],
+ "notify_no_data": module.boolean(module.params['notify_no_data']),
+ "no_data_timeframe": module.params['no_data_timeframe'],
+ "timeout_h": module.params['timeout_h'],
+ "renotify_interval": module.params['renotify_interval'],
+ "escalation_message": module.params['escalation_message'],
+ "notify_audit": module.boolean(module.params['notify_audit']),
+ "locked": module.boolean(module.params['locked']),
+ }
+
+ if module.params['type'] == "service check":
+ options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
+ if module.params['type'] == "metric alert" and module.params['thresholds'] is not None:
+ options["thresholds"] = module.params['thresholds']
+
+ monitor = _get_monitor(module)
+ if not monitor:
+ _post_monitor(module, options)
+ else:
+ _update_monitor(module, monitor, options)
+
+
+def delete_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.delete(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+
+def mute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif monitor['options']['silenced']:
+ module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
+ elif (module.params['silenced'] is not None
+ and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0):
+ module.exit_json(changed=False)
+ try:
+ if module.params['silenced'] is None or module.params['silenced'] == "":
+ msg = api.Monitor.mute(id=monitor['id'])
+ else:
+ msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+
+def unmute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif not monitor['options']['silenced']:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.unmute(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception, e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/honeybadger_deployment.py b/lib/ansible/modules/extras/monitoring/honeybadger_deployment.py
new file mode 100644
index 0000000000..3a6d2df7c8
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/honeybadger_deployment.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Benjamin Curtis <benjamin.curtis@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: honeybadger_deployment
+author: "Benjamin Curtis (@stympy)"
+version_added: "2.2"
+short_description: Notify Honeybadger.io about app deployments
+description:
+ - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking)
+options:
+ token:
+ description:
+ - API token.
+ required: true
+ environment:
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: true
+ user:
+ description:
+ - The username of the person doing the deployment
+ required: false
+ default: None
+ repo:
+ description:
+ - URL of the project repository
+ required: false
+ default: None
+ revision:
+ description:
+ - A hash, number, tag, or other identifier showing what revision was deployed
+ required: false
+ default: None
+ url:
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: "https://api.honeybadger.io/v1/deploys"
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- honeybadger_deployment: token=AAAAAA
+ environment='staging'
+ user='ansible'
+ revision=b6826b8
+ repo=git@github.com:user/repo.git
+'''
+
+RETURN = '''# '''
+
+import urllib
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import *
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ user=dict(required=False),
+ repo=dict(required=False),
+ revision=dict(required=False),
+ url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
+ validate_certs=dict(default='yes', type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ params = {}
+
+ if module.params["environment"]:
+ params["deploy[environment]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[revision]"] = module.params["revision"]
+
+ params["api_key"] = module.params["token"]
+
+ url = module.params.get('url')
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg='Unable to notify Honeybadger: %s' % e)
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/extras/monitoring/librato_annotation.py b/lib/ansible/modules/extras/monitoring/librato_annotation.py
new file mode 100644
index 0000000000..f174bda0ea
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/librato_annotation.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (C) Seth Edwards, 2014
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+DOCUMENTATION = '''
+---
+module: librato_annotation
+short_description: create an annotation in librato
+description:
+ - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
+version_added: "1.6"
+author: "Seth Edwards (@sedward)"
+requirements: []
+options:
+ user:
+ description:
+ - Librato account username
+ required: true
+ api_key:
+ description:
+ - Librato account api key
+ required: true
+ name:
+ description:
+ - The annotation stream name
+ - If the annotation stream does not exist, it will be created automatically
+ required: false
+ title:
+ description:
+ - The title of an annotation is a string and may contain spaces
+ - The title should be a short, high-level summary of the annotation e.g. v45 Deployment
+ required: true
+ source:
+ description:
+ - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
+ required: false
+ description:
+ description:
+ - The description contains extra meta-data about a particular annotation
+ - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
+ required: false
+ start_time:
+ description:
+ - The unix timestamp indicating the the time at which the event referenced by this annotation started
+ required: false
+ end_time:
+ description:
+ - The unix timestamp indicating the the time at which the event referenced by this annotation ended
+ - For events that have a duration, this is a useful way to annotate the duration of the event
+ required: false
+ links:
+ description:
+ - See examples
+ required: true
+'''
+
+EXAMPLES = '''
+# Create a simple annotation event with a source
+- librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXX
+ title: 'App Config Change'
+ source: 'foo.bar'
+ description: 'This is a detailed description of the config change'
+
+# Create an annotation that includes a link
+- librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: 'code.deploy'
+ title: 'app code deploy'
+ description: 'this is a detailed description of a deployment'
+ links:
+ - { rel: 'example', href: 'http://www.example.com/deploy' }
+
+# Create an annotation with a start_time and end_time
+- librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: 'maintenance'
+ title: 'Maintenance window'
+ description: 'This is a detailed description of maintenance'
+ start_time: 1395940006
+ end_time: 1395954406
+'''
+
+def post_annotation(module):
+ user = module.params['user']
+ api_key = module.params['api_key']
+ name = module.params['name']
+ title = module.params['title']
+
+ url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
+ params = {}
+ params['title'] = title
+
+ if module.params['source'] != None:
+ params['source'] = module.params['source']
+ if module.params['description'] != None:
+ params['description'] = module.params['description']
+ if module.params['start_time'] != None:
+ params['start_time'] = module.params['start_time']
+ if module.params['end_time'] != None:
+ params['end_time'] = module.params['end_time']
+ if module.params['links'] != None:
+ params['links'] = module.params['links']
+
+ json_body = module.jsonify(params)
+
+ headers = {}
+ headers['Content-Type'] = 'application/json'
+
+ # Hack send parameters the way fetch_url wants them
+ module.params['url_username'] = user
+ module.params['url_password'] = api_key
+ response, info = fetch_url(module, url, data=json_body, headers=headers)
+ if info['status'] != 200:
+ module.fail_json(msg="Request Failed", reason=e.reason)
+ response = response.read()
+ module.exit_json(changed=True, annotation=response)
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ user = dict(required=True),
+ api_key = dict(required=True),
+ name = dict(required=False),
+ title = dict(required=True),
+ source = dict(required=False),
+ description = dict(required=False),
+ start_time = dict(required=False, default=None, type='int'),
+ end_time = dict(require=False, default=None, type='int'),
+ links = dict(type='list')
+ )
+ )
+
+ post_annotation(module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/logentries.py b/lib/ansible/modules/extras/monitoring/logentries.py
new file mode 100644
index 0000000000..a347afd84c
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/logentries.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Ivan Vanderbyl <ivan@app.io>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: logentries
+author: "Ivan Vanderbyl (@ivanvanderbyl)"
+short_description: Module for tracking logs via logentries.com
+description:
+ - Sends logs to LogEntries in realtime
+version_added: "1.6"
+options:
+ path:
+ description:
+ - path to a log file
+ required: true
+ state:
+ description:
+ - following state of the log
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ name:
+ description:
+ - name of the log
+ required: false
+ logtype:
+ description:
+ - type of the log
+ required: false
+
+notes:
+ - Requires the LogEntries agent which can be installed following the instructions at logentries.com
+'''
+EXAMPLES = '''
+- logentries: path=/var/log/nginx/access.log state=present name=nginx-access-log
+- logentries: path=/var/log/nginx/error.log state=absent
+'''
+
+def query_log_status(module, le_path, path, state="present"):
+ """ Returns whether a log is followed or not. """
+
+ if state == "present":
+ rc, out, err = module.run_command("%s followed %s" % (le_path, path))
+ if rc == 0:
+ return True
+
+ return False
+
+def follow_log(module, le_path, logs, name=None, logtype=None):
+ """ Follows one or more logs if not already followed. """
+
+ followed_count = 0
+
+ for log in logs:
+ if query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ cmd = [le_path, 'follow', log]
+ if name:
+ cmd.extend(['--name',name])
+ if logtype:
+ cmd.extend(['--type',logtype])
+ rc, out, err = module.run_command(' '.join(cmd))
+
+ if not query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
+
+ followed_count += 1
+
+ if followed_count > 0:
+ module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
+
+ module.exit_json(changed=False, msg="logs(s) already followed")
+
+def unfollow_log(module, le_path, logs):
+ """ Unfollows one or more logs if followed. """
+
+ removed_count = 0
+
+ # Using a for loop incase of error, we can report the package that failed
+ for log in logs:
+ # Query the log first, to see if we even need to remove.
+ if not query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command([le_path, 'rm', log])
+
+ if query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
+
+ removed_count += 1
+
+ if removed_count > 0:
+ module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
+
+ module.exit_json(changed=False, msg="logs(s) already unfollowed")
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ path = dict(required=True),
+ state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
+ name = dict(required=False, default=None, type='str'),
+ logtype = dict(required=False, default=None, type='str', aliases=['type'])
+ ),
+ supports_check_mode=True
+ )
+
+ le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
+
+ p = module.params
+
+ # Handle multiple log files
+ logs = p["path"].split(",")
+ logs = filter(None, logs)
+
+ if p["state"] in ["present", "followed"]:
+ follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype'])
+
+ elif p["state"] in ["absent", "unfollowed"]:
+ unfollow_log(module, le_path, logs)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/monitoring/logicmonitor.py b/lib/ansible/modules/extras/monitoring/logicmonitor.py
new file mode 100644
index 0000000000..8d35f3bfbb
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/logicmonitor.py
@@ -0,0 +1,2169 @@
+#!/usr/bin/python
+
+"""LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups
+ Copyright (C) 2015 LogicMonitor
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA"""
+
+import datetime
+import os
+import platform
+import socket
+import sys
+import types
+import urllib
+
+HAS_LIB_JSON = True
+try:
+ import json
+ # Detect the python-json library which is incompatible
+ # Look for simplejson if that's the case
+ try:
+ if (
+ not isinstance(json.loads, types.FunctionType) or
+ not isinstance(json.dumps, types.FunctionType)
+ ):
+ raise ImportError
+ except AttributeError:
+ raise ImportError
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ print(
+ '\n{"msg": "Error: ansible requires the stdlib json or ' +
+ 'simplejson module, neither was found!", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+ except SyntaxError:
+ print(
+ '\n{"msg": "SyntaxError: probably due to installed simplejson ' +
+ 'being for a different python version", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+
+RETURN = '''
+---
+success:
+ description: flag indicating that execution was successful
+ returned: success
+ type: boolean
+ sample: True
+...
+'''
+
+
+DOCUMENTATION = '''
+---
+module: logicmonitor
+short_description: Manage your LogicMonitor account through Ansible Playbooks
+description:
+ - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform.
+ - This module manages hosts, host groups, and collectors within your LogicMonitor account.
+version_added: "2.2"
+author: Ethan Culler-Mayeno, Jeff Wozniak
+notes:
+ - You must have an existing LogicMonitor account for this module to function.
+requirements: ["An existing LogicMonitor account", "Linux"]
+options:
+ target:
+ description:
+ - The type of LogicMonitor object you wish to manage.
+ - "Collector: Perform actions on a LogicMonitor collector"
+ - NOTE You should use Ansible service modules such as 'service' or 'supervisorctl' for managing the Collector 'logicmonitor-agent' and 'logicmonitor-watchdog' services. Specifically, you'll probably want to start these services after a Collector add and stop these services before a Collector remove.
+ - "Host: Perform actions on a host device"
+ - "Hostgroup: Perform actions on a LogicMonitor host group"
+ - NOTE Host and Hostgroup tasks should always be performed via local_action. There are no benefits to running these tasks on the remote host and doing so will typically cause problems.
+ required: true
+ default: null
+ choices: ['collector', 'host', 'datsource', 'hostgroup']
+ action:
+ description:
+ - The action you wish to perform on target
+ - "Add: Add an object to your LogicMonitor account"
+ - "Remove: Remove an object from your LogicMonitor account"
+ - "Update: Update properties, description, or groups (target=host) for an object in your LogicMonitor account"
+ - "SDT: Schedule downtime for an object in your LogicMonitor account"
+ required: true
+ default: null
+ choices: ['add', 'remove', 'update', 'sdt']
+ company:
+ description:
+ - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes"
+ required: true
+ default: null
+ user:
+ description:
+ - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user
+ required: true
+ default: null
+ password:
+ description:
+ - The password of the specified LogicMonitor user
+ required: true
+ default: null
+ collector:
+ description:
+ - The fully qualified domain name of a collector in your LogicMonitor account.
+ - This is required for the creation of a LogicMonitor host (target=host action=add)
+ - This is required for updating, removing or scheduling downtime for hosts if 'displayname' isn't specified (target=host action=update action=remove action=sdt)
+ required: false
+ default: null
+ hostname:
+ description:
+ - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to manage.
+ - Optional for managing hosts (target=host)
+ required: false
+ default: 'hostname -f'
+ displayname:
+ description:
+ - The display name of a host in your LogicMonitor account or the desired display name of a device to manage.
+ - Optional for managing hosts (target=host)
+ required: false
+ default: 'hostname -f'
+ description:
+ description:
+ - The long text description of the object in your LogicMonitor account
+ - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update)
+ required: false
+ default: ""
+ properties:
+ description:
+ - A dictionary of properties to set on the LogicMonitor host or host group.
+ - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update)
+ - This parameter will add or update existing properties in your LogicMonitor account or
+ required: false
+ default: {}
+ groups:
+ description:
+ - A list of groups that the host should be a member of.
+ - Optional for managing hosts (target=host; action=add or action=update)
+ required: false
+ default: []
+ id:
+ description:
+ - ID of the datasource to target
+ - Required for management of LogicMonitor datasources (target=datasource)
+ required: false
+ default: null
+ fullpath:
+ description:
+ - The fullpath of the host group object you would like to manage
+ - Recommend running on a single Ansible host
+ - Required for management of LogicMonitor host groups (target=hostgroup)
+ required: false
+ default: null
+ alertenable:
+ description:
+ - A boolean flag to turn alerting on or off for an object
+ - Optional for managing all hosts (action=add or action=update)
+ required: false
+ default: true
+ choices: [true, false]
+ starttime:
+ description:
+ - The time that the Scheduled Down Time (SDT) should begin
+ - Optional for managing SDT (action=sdt)
+ - Y-m-d H:M
+ required: false
+ default: Now
+ duration:
+ description:
+ - The duration (minutes) of the Scheduled Down Time (SDT)
+ - Optional for putting an object into SDT (action=sdt)
+ required: false
+ default: 30
+...
+'''
+EXAMPLES = '''
+ # example of adding a new LogicMonitor collector to these devices
+ ---
+ - hosts: collectors
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Deploy/verify LogicMonitor collectors
+ become: yes
+ logicmonitor:
+ target=collector
+ action=add
+ company={{ company }}
+ user={{ user }}
+ password={{ password }}
+
+ #example of adding a list of hosts into monitoring
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Deploy LogicMonitor Host
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=add
+ collector='mycompany-Collector'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ groups="/servers/production,/datacenter1"
+ properties="{'snmp.community':'secret','dc':'1', 'type':'prod'}"
+
+ #example of putting a datasource in SDT
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: SDT a datasource
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=datasource
+ action=sdt
+ id='123'
+ duration=3000
+ starttime='2017-03-04 05:06'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+
+ #example of creating a hostgroup
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Create a host group
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=add
+ fullpath='/servers/development'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring', 'type':'dev'}"
+
+ #example of putting a list of hosts into SDT
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: SDT hosts
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=sdt
+ duration=3000
+ starttime='2016-11-10 09:08'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ collector='mycompany-Collector'
+
+ #example of putting a host group in SDT
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: SDT a host group
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=sdt
+ fullpath='/servers/development'
+ duration=3000
+ starttime='2017-03-04 05:06'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+
+ #example of updating a list of hosts
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Update a list of hosts
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=update
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ collector='mycompany-Collector'
+ groups="/servers/production,/datacenter5"
+ properties="{'snmp.community':'commstring','dc':'5'}"
+
+ #example of updating a hostgroup
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Update a host group
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=update
+ fullpath='/servers/development'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'hg', 'type':'dev', 'status':'test'}"
+
+ #example of removing a list of hosts from monitoring
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Remove LogicMonitor hosts
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ collector='mycompany-Collector'
+
+ #example of removing a host group
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Remove LogicMonitor development servers hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/servers/development'
+ - name: Remove LogicMonitor servers hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/servers'
+ - name: Remove LogicMonitor datacenter1 hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/datacenter1'
+ - name: Remove LogicMonitor datacenter5 hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/datacenter5'
+
+ ### example of removing a new LogicMonitor collector to these devices
+ ---
+ - hosts: collectors
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Remove LogicMonitor collectors
+ become: yes
+ logicmonitor:
+ target=collector
+ action=remove
+ company={{ company }}
+ user={{ user }}
+ password={{ password }}
+
+ #complete example
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Create a host group
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=add
+ fullpath='/servers/production/database'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring'}"
+ - name: SDT a host group
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=sdt
+ fullpath='/servers/production/web'
+ duration=3000
+ starttime='2012-03-04 05:06'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+
+ - hosts: collectors
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Deploy/verify LogicMonitor collectors
+ logicmonitor:
+ target: collector
+ action: add
+ company: {{ company }}
+ user: {{ user }}
+ password: {{ password }}
+ - name: Place LogicMonitor collectors into 30 minute Scheduled downtime
+ logicmonitor: target=collector action=sdt company={{ company }}
+ user={{ user }} password={{ password }}
+ - name: Deploy LogicMonitor Host
+ local_action: >
+ logicmonitor
+ target=host
+ action=add
+ collector=agent1.ethandev.com
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring', 'dc':'1'}"
+ groups="/servers/production/collectors, /datacenter1"
+
+ - hosts: database-servers
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: deploy logicmonitor hosts
+ local_action: >
+ logicmonitor
+ target=host
+ action=add
+ collector=monitoring.dev.com
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring', 'type':'db', 'dc':'1'}"
+ groups="/servers/production/database, /datacenter1"
+ - name: schedule 5 hour downtime for 2012-11-10 09:08
+ local_action: >
+ logicmonitor
+ target=host
+ action=sdt
+ duration=3000
+ starttime='2012-11-10 09:08'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+'''
+
+
+class LogicMonitor(object):
+
+ def __init__(self, module, **params):
+ self.__version__ = "1.0-python"
+ self.module = module
+ self.module.debug("Instantiating LogicMonitor object")
+
+ self.check_mode = False
+ self.company = params["company"]
+ self.user = params["user"]
+ self.password = params["password"]
+ self.fqdn = socket.getfqdn()
+ self.lm_url = "logicmonitor.com/santaba"
+ self.__version__ = self.__version__ + "-ansible-module"
+
+ def rpc(self, action, params):
+ """Make a call to the LogicMonitor RPC library
+ and return the response"""
+ self.module.debug("Running LogicMonitor.rpc")
+
+ param_str = urllib.urlencode(params)
+ creds = urllib.urlencode(
+ {"c": self.company,
+ "u": self.user,
+ "p": self.password})
+
+ if param_str:
+ param_str = param_str + "&"
+
+ param_str = param_str + creds
+
+ try:
+ url = ("https://" + self.company + "." + self.lm_url +
+ "/rpc/" + action + "?" + param_str)
+
+ # Set custom LogicMonitor header with version
+ headers = {"X-LM-User-Agent": self.__version__}
+
+ # Set headers
+ f = open_url(url, headers=headers)
+
+ raw = f.read()
+ resp = json.loads(raw)
+ if resp["status"] == 403:
+ self.module.debug("Authentication failed.")
+ self.fail(msg="Error: " + resp["errmsg"])
+ else:
+ return raw
+ except IOError:
+ self.fail(msg="Error: Unknown exception making RPC call")
+
+ def do(self, action, params):
+ """Make a call to the LogicMonitor
+ server \"do\" function"""
+ self.module.debug("Running LogicMonitor.do...")
+
+ param_str = urllib.urlencode(params)
+ creds = (urllib.urlencode(
+ {"c": self.company,
+ "u": self.user,
+ "p": self.password}))
+
+ if param_str:
+ param_str = param_str + "&"
+ param_str = param_str + creds
+
+ try:
+ self.module.debug("Attempting to open URL: " +
+ "https://" + self.company + "." + self.lm_url +
+ "/do/" + action + "?" + param_str)
+ f = open_url(
+ "https://" + self.company + "." + self.lm_url +
+ "/do/" + action + "?" + param_str)
+ return f.read()
+ except IOError:
+ # self.module.debug("Error opening URL. " + ioe)
+ self.fail("Unknown exception opening URL")
+
+ def get_collectors(self):
+ """Returns a JSON object containing a list of
+ LogicMonitor collectors"""
+ self.module.debug("Running LogicMonitor.get_collectors...")
+
+ self.module.debug("Making RPC call to 'getAgents'")
+ resp = self.rpc("getAgents", {})
+ resp_json = json.loads(resp)
+
+ if resp_json["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ return resp_json["data"]
+ else:
+ self.fail(msg=resp)
+
+ def get_host_by_hostname(self, hostname, collector):
+ """Returns a host object for the host matching the
+ specified hostname"""
+ self.module.debug("Running LogicMonitor.get_host_by_hostname...")
+
+ self.module.debug("Looking for hostname " + hostname)
+ self.module.debug("Making RPC call to 'getHosts'")
+ hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1}))
+
+ if collector:
+ if hostlist_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ hosts = hostlist_json["data"]["hosts"]
+
+ self.module.debug(
+ "Looking for host matching: hostname " + hostname +
+ " and collector " + str(collector["id"]))
+
+ for host in hosts:
+ if (host["hostName"] == hostname and
+ host["agentId"] == collector["id"]):
+
+ self.module.debug("Host match found")
+ return host
+ self.module.debug("No host match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(hostlist_json)
+ else:
+ self.module.debug("No collector specified")
+ return None
+
+ def get_host_by_displayname(self, displayname):
+ """Returns a host object for the host matching the
+ specified display name"""
+ self.module.debug("Running LogicMonitor.get_host_by_displayname...")
+
+ self.module.debug("Looking for displayname " + displayname)
+ self.module.debug("Making RPC call to 'getHost'")
+ host_json = (json.loads(self.rpc("getHost",
+ {"displayName": displayname})))
+
+ if host_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return host_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(host_json)
+ return None
+
+ def get_collector_by_description(self, description):
+ """Returns a JSON collector object for the collector
+ matching the specified FQDN (description)"""
+ self.module.debug(
+ "Running LogicMonitor.get_collector_by_description..."
+ )
+
+ collector_list = self.get_collectors()
+ if collector_list is not None:
+ self.module.debug("Looking for collector with description {0}" +
+ description)
+ for collector in collector_list:
+ if collector["description"] == description:
+ self.module.debug("Collector match found")
+ return collector
+ self.module.debug("No collector match found")
+ return None
+
+ def get_group(self, fullpath):
+ """Returns a JSON group object for the group matching the
+ specified path"""
+ self.module.debug("Running LogicMonitor.get_group...")
+
+ self.module.debug("Making RPC call to getHostGroups")
+ resp = json.loads(self.rpc("getHostGroups", {}))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC called succeeded")
+ groups = resp["data"]
+
+ self.module.debug("Looking for group matching " + fullpath)
+ for group in groups:
+ if group["fullPath"] == fullpath.lstrip('/'):
+ self.module.debug("Group match found")
+ return group
+
+ self.module.debug("No group match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+
+ return None
+
+ def create_group(self, fullpath):
+ """Recursively create a path of host groups.
+ Returns the id of the newly created hostgroup"""
+ self.module.debug("Running LogicMonitor.create_group...")
+
+ res = self.get_group(fullpath)
+ if res:
+ self.module.debug("Group {0} exists." + fullpath)
+ return res["id"]
+
+ if fullpath == "/":
+ self.module.debug("Specified group is root. Doing nothing.")
+ return 1
+ else:
+ self.module.debug("Creating group named " + fullpath)
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ parentpath, name = fullpath.rsplit('/', 1)
+ parentgroup = self.get_group(parentpath)
+
+ parentid = 1
+
+ if parentpath == "":
+ parentid = 1
+ elif parentgroup:
+ parentid = parentgroup["id"]
+ else:
+ parentid = self.create_group(parentpath)
+
+ h = None
+
+ # Determine if we're creating a group from host or hostgroup class
+ if hasattr(self, '_build_host_group_hash'):
+ h = self._build_host_group_hash(
+ fullpath,
+ self.description,
+ self.properties,
+ self.alertenable)
+ h["name"] = name
+ h["parentId"] = parentid
+ else:
+ h = {"name": name,
+ "parentId": parentid,
+ "alertEnable": True,
+ "description": ""}
+
+ self.module.debug("Making RPC call to 'addHostGroup'")
+ resp = json.loads(
+ self.rpc("addHostGroup", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["id"]
+ elif resp["errmsg"] == "The record already exists":
+ self.module.debug("The hostgroup already exists")
+ group = self.get_group(fullpath)
+ return group["id"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(
+ msg="Error: unable to create new hostgroup \"" +
+ name + "\".\n" + resp["errmsg"])
+
+ def fail(self, msg):
+ self.module.fail_json(msg=msg, changed=self.change, failed=True)
+
+ def exit(self, changed):
+ self.module.debug("Changed: " + changed)
+ self.module.exit_json(changed=changed, success=True)
+
+ def output_info(self, info):
+ self.module.debug("Registering properties as Ansible facts")
+ self.module.exit_json(changed=False, ansible_facts=info)
+
+
+class Collector(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor Collector object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **params)
+ self.module.debug("Instantiating Collector object")
+
+ if self.params['description']:
+ self.description = self.params['description']
+ else:
+ self.description = self.fqdn
+
+ self.info = self._get()
+ self.installdir = "/usr/local/logicmonitor"
+ self.platform = platform.system()
+ self.is_64bits = sys.maxsize > 2**32
+ self.duration = self.params['duration']
+ self.starttime = self.params['starttime']
+
+ if self.info is None:
+ self.id = None
+ else:
+ self.id = self.info["id"]
+
+ def create(self):
+ """Idempotent function to make sure that there is
+ a running collector installed and registered"""
+ self.module.debug("Running Collector.create...")
+
+ self._create()
+ self.get_installer_binary()
+ self.install()
+
+ def remove(self):
+ """Idempotent function to make sure that there is
+ not a running collector installed and registered"""
+ self.module.debug("Running Collector.destroy...")
+
+ self._unreigster()
+ self.uninstall()
+
+ def get_installer_binary(self):
+ """Download the LogicMonitor collector installer binary"""
+ self.module.debug("Running Collector.get_installer_binary...")
+
+ arch = 32
+
+ if self.is_64bits:
+ self.module.debug("64 bit system")
+ arch = 64
+ else:
+ self.module.debug("32 bit system")
+
+ if self.platform == "Linux" and self.id is not None:
+ self.module.debug("Platform is Linux")
+ self.module.debug("Agent ID is " + str(self.id))
+
+ installfilepath = (self.installdir +
+ "/logicmonitorsetup" +
+ str(self.id) + "_" + str(arch) +
+ ".bin")
+
+ self.module.debug("Looking for existing installer at " +
+ installfilepath)
+ if not os.path.isfile(installfilepath):
+ self.module.debug("No previous installer found")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Downloading installer file")
+ # attempt to create the install dir before download
+ self.module.run_command("mkdir " + self.installdir)
+
+ try:
+ f = open(installfilepath, "w")
+ installer = (self.do("logicmonitorsetup",
+ {"id": self.id,
+ "arch": arch}))
+ f.write(installer)
+ f.closed
+ except:
+ self.fail(msg="Unable to open installer file for writing")
+ f.closed
+ else:
+ self.module.debug("Collector installer already exists")
+ return installfilepath
+
+ elif self.id is None:
+ self.fail(
+ msg="Error: There is currently no collector " +
+ "associated with this device. To download " +
+ " the installer, first create a collector " +
+ "for this device.")
+ elif self.platform != "Linux":
+ self.fail(
+ msg="Error: LogicMonitor Collector must be " +
+ "installed on a Linux device.")
+ else:
+ self.fail(
+ msg="Error: Unable to retrieve the installer from the server")
+
+ def install(self):
+ """Execute the LogicMonitor installer if not
+ already installed"""
+ self.module.debug("Running Collector.install...")
+
+ if self.platform == "Linux":
+ self.module.debug("Platform is Linux")
+
+ installer = self.get_installer_binary()
+
+ if self.info is None:
+ self.module.debug("Retriving collector information")
+ self.info = self._get()
+
+ if not os.path.exists(self.installdir + "/agent"):
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Setting installer file permissions")
+ os.chmod(installer, 484) # decimal for 0o744
+
+ self.module.debug("Executing installer")
+ ret_code, out, err = self.module.run_command(installer + " -y")
+
+ if ret_code != 0:
+ self.fail(msg="Error: Unable to install collector: " + err)
+ else:
+ self.module.debug("Collector installed successfully")
+ else:
+ self.module.debug("Collector already installed")
+ else:
+ self.fail(
+ msg="Error: LogicMonitor Collector must be " +
+ "installed on a Linux device")
+
+ def uninstall(self):
+ """Uninstall LogicMontitor collector from the system"""
+ self.module.debug("Running Collector.uninstall...")
+
+ uninstallfile = self.installdir + "/agent/bin/uninstall.pl"
+
+ if os.path.isfile(uninstallfile):
+ self.module.debug("Collector uninstall file exists")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Running collector uninstaller")
+ ret_code, out, err = self.module.run_command(uninstallfile)
+
+ if ret_code != 0:
+ self.fail(
+ msg="Error: Unable to uninstall collector: " + err)
+ else:
+ self.module.debug("Collector successfully uninstalled")
+ else:
+ if os.path.exists(self.installdir + "/agent"):
+ (self.fail(
+ msg="Unable to uninstall LogicMonitor " +
+ "Collector. Can not find LogicMonitor " +
+ "uninstaller."))
+
+ def sdt(self):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Collector.sdt...")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offsetstart = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"agentId": self.id,
+ "type": 1,
+ "notifyCC": True,
+ "year": offsetstart.year,
+ "month": offsetstart.month-1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month-1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to 'setAgentSDT'")
+ resp = json.loads(self.rpc("setAgentSDT", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+
+ def site_facts(self):
+ """Output current properties information for the Collector"""
+ self.module.debug("Running Collector.site_facts...")
+
+ if self.info:
+ self.module.debug("Collector exists")
+ props = self.get_properties(True)
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Collector doesn't exit.")
+
+ def _get(self):
+ """Returns a JSON object representing this collector"""
+ self.module.debug("Running Collector._get...")
+ collector_list = self.get_collectors()
+
+ if collector_list is not None:
+ self.module.debug("Collectors returned")
+ for collector in collector_list:
+ if collector["description"] == self.description:
+ return collector
+ else:
+ self.module.debug("No collectors returned")
+ return None
+
+ def _create(self):
+ """Create a new collector in the associated
+ LogicMonitor account"""
+ self.module.debug("Running Collector._create...")
+
+ if self.platform == "Linux":
+ self.module.debug("Platform is Linux")
+ ret = self.info or self._get()
+
+ if ret is None:
+ self.change = True
+ self.module.debug("System changed")
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = {"autogen": True,
+ "description": self.description}
+
+ self.module.debug("Making RPC call to 'addAgent'")
+ create = (json.loads(self.rpc("addAgent", h)))
+
+ if create["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ self.info = create["data"]
+ self.id = create["data"]["id"]
+ return create["data"]
+ else:
+ self.fail(msg=create["errmsg"])
+ else:
+ self.info = ret
+ self.id = ret["id"]
+ return ret
+ else:
+ self.fail(
+ msg="Error: LogicMonitor Collector must be " +
+ "installed on a Linux device.")
+
+ def _unreigster(self):
+ """Delete this collector from the associated
+ LogicMonitor account"""
+ self.module.debug("Running Collector._unreigster...")
+
+ if self.info is None:
+ self.module.debug("Retrieving collector information")
+ self.info = self._get()
+
+ if self.info is not None:
+ self.module.debug("Collector found")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Making RPC call to 'deleteAgent'")
+ delete = json.loads(self.rpc("deleteAgent",
+ {"id": self.id}))
+
+ if delete["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ return delete
+ else:
+ # The collector couldn't unregister. Start the service again
+ self.module.debug("Error unregistering collecting. " +
+ delete["errmsg"])
+ self.fail(msg=delete["errmsg"])
+ else:
+ self.module.debug("Collector not found")
+ return None
+
+
+class Host(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+ self.collector = None
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Host object")
+
+ if self.params["hostname"]:
+ self.module.debug("Hostname is " + self.params["hostname"])
+ self.hostname = self.params['hostname']
+ else:
+ self.module.debug("No hostname specified. Using " + self.fqdn)
+ self.hostname = self.fqdn
+
+ if self.params["displayname"]:
+ self.module.debug("Display name is " + self.params["displayname"])
+ self.displayname = self.params['displayname']
+ else:
+ self.module.debug("No display name specified. Using " + self.fqdn)
+ self.displayname = self.fqdn
+
+ # Attempt to host information via display name of host name
+ self.module.debug("Attempting to find host by displayname " +
+ self.displayname)
+ info = self.get_host_by_displayname(self.displayname)
+
+ if info is not None:
+ self.module.debug("Host found by displayname")
+ # Used the host information to grab the collector description
+ # if not provided
+ if (not hasattr(self.params, "collector") and
+ "agentDescription" in info):
+ self.module.debug("Setting collector from host response. " +
+ "Collector " + info["agentDescription"])
+ self.params["collector"] = info["agentDescription"]
+ else:
+ self.module.debug("Host not found by displayname")
+
+ # At this point, a valid collector description is required for success
+ # Check that the description exists or fail
+ if self.params["collector"]:
+ self.module.debug(
+ "Collector specified is " +
+ self.params["collector"]
+ )
+ self.collector = (self.get_collector_by_description(
+ self.params["collector"]))
+ else:
+ self.fail(msg="No collector specified.")
+
+ # If the host wasn't found via displayname, attempt by hostname
+ if info is None:
+ self.module.debug("Attempting to find host by hostname " +
+ self.hostname)
+ info = self.get_host_by_hostname(self.hostname, self.collector)
+
+ self.info = info
+ self.properties = self.params["properties"]
+ self.description = self.params["description"]
+ self.starttime = self.params["starttime"]
+ self.duration = self.params["duration"]
+ self.alertenable = self.params["alertenable"]
+ if self.params["groups"] is not None:
+ self.groups = self._strip_groups(self.params["groups"])
+ else:
+ self.groups = None
+
+ def create(self):
+ """Idemopotent function to create if missing,
+ update if changed, or skip"""
+ self.module.debug("Running Host.create...")
+
+ self.update()
+
+ def get_properties(self):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Host.get_properties...")
+
+ if self.info:
+ self.module.debug("Making RPC call to 'getHostProperties'")
+ properties_json = (json.loads(self.rpc("getHostProperties",
+ {'hostId': self.info["id"],
+ "filterSystemProperties": True})))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("Error: there was an issue retrieving the " +
+ "host properties")
+ self.module.debug(properties_json["errmsg"])
+
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug(
+ "Unable to find LogicMonitor host which matches " +
+ self.displayname + " (" + self.hostname + ")"
+ )
+ return None
+
+ def set_properties(self, propertyhash):
+ """update the host to have the properties
+ contained in the property hash"""
+ self.module.debug("Running Host.set_properties...")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Assigning property hash to host object")
+ self.properties = propertyhash
+
+ def add(self):
+ """Add this device to monitoring
+ in your LogicMonitor account"""
+ self.module.debug("Running Host.add...")
+
+ if self.collector and not self.info:
+ self.module.debug("Host not registered. Registering.")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = self._build_host_hash(
+ self.hostname,
+ self.displayname,
+ self.collector,
+ self.description,
+ self.groups,
+ self.properties,
+ self.alertenable)
+
+ self.module.debug("Making RPC call to 'addHost'")
+ resp = json.loads(self.rpc("addHost", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+ return resp["errmsg"]
+ elif self.collector is None:
+ self.fail(msg="Specified collector doesn't exist")
+ else:
+ self.module.debug("Host already registered")
+
+ def update(self):
+ """This method takes changes made to this host
+ and applies them to the corresponding host
+ in your LogicMonitor account."""
+ self.module.debug("Running Host.update...")
+
+ if self.info:
+ self.module.debug("Host already registed")
+ if self.is_changed():
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = (self._build_host_hash(
+ self.hostname,
+ self.displayname,
+ self.collector,
+ self.description,
+ self.groups,
+ self.properties,
+ self.alertenable))
+ h["id"] = self.info["id"]
+ h["opType"] = "replace"
+
+ self.module.debug("Making RPC call to 'updateHost'")
+ resp = json.loads(self.rpc("updateHost", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg="Error: unable to update the host.")
+ else:
+ self.module.debug(
+ "Host properties match supplied properties. " +
+ "No changes to make."
+ )
+ return self.info
+ else:
+ self.module.debug("Host not registed. Registering")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ return self.add()
+
+ def remove(self):
+ """Remove this host from your LogicMonitor account"""
+ self.module.debug("Running Host.remove...")
+
+ if self.info:
+ self.module.debug("Host registered")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Making RPC call to 'deleteHost'")
+ resp = json.loads(self.rpc("deleteHost",
+ {"hostId": self.info["id"],
+ "deleteFromSystem": True,
+ "hostGroupId": 1}))
+
+ if resp["status"] == 200:
+ self.module.debug(resp)
+ self.module.debug("RPC call succeeded")
+ return resp
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+ self.fail(msg=resp["errmsg"])
+
+ else:
+ self.module.debug("Host not registered")
+
+ def is_changed(self):
+ """Return true if the host doesn't
+ match the LogicMonitor account"""
+ self.module.debug("Running Host.is_changed")
+
+ ignore = ['system.categories', 'snmp.version']
+
+ hostresp = self.get_host_by_displayname(self.displayname)
+
+ if hostresp is None:
+ hostresp = self.get_host_by_hostname(self.hostname, self.collector)
+
+ if hostresp:
+ self.module.debug("Comparing simple host properties")
+ if hostresp["alertEnable"] != self.alertenable:
+ return True
+
+ if hostresp["description"] != self.description:
+ return True
+
+ if hostresp["displayedAs"] != self.displayname:
+ return True
+
+ if (self.collector and
+ hasattr(self.collector, "id") and
+ hostresp["agentId"] != self.collector["id"]):
+ return True
+
+ self.module.debug("Comparing groups.")
+ if self._compare_groups(hostresp) is True:
+ return True
+
+ propresp = self.get_properties()
+
+ if propresp:
+ self.module.debug("Comparing properties.")
+ if self._compare_props(propresp, ignore) is True:
+ return True
+ else:
+ self.fail(
+ msg="Error: Unknown error retrieving host properties")
+
+ return False
+ else:
+ self.fail(msg="Error: Unknown error retrieving host information")
+
+ def sdt(self):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Host.sdt...")
+ if self.info:
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offset = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = (json.loads(self.rpc("getTimeZoneSetting", {})))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(
+ msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"hostId": self.info["id"],
+ "type": 1,
+ "year": offsetstart.year,
+ "month": offsetstart.month - 1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month - 1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to 'setHostSDT'")
+ resp = (json.loads(self.rpc("setHostSDT", h)))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+ else:
+ self.fail(msg="Error: Host doesn't exit.")
+
+ def site_facts(self):
+ """Output current properties information for the Host"""
+ self.module.debug("Running Host.site_facts...")
+
+ if self.info:
+ self.module.debug("Host exists")
+ props = self.get_properties()
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Host doesn't exit.")
+
+ def _build_host_hash(self,
+ hostname,
+ displayname,
+ collector,
+ description,
+ groups,
+ properties,
+ alertenable):
+ """Return a property formated hash for the
+ creation of a host using the rpc function"""
+ self.module.debug("Running Host._build_host_hash...")
+
+ h = {}
+ h["hostName"] = hostname
+ h["displayedAs"] = displayname
+ h["alertEnable"] = alertenable
+
+ if collector:
+ self.module.debug("Collector property exists")
+ h["agentId"] = collector["id"]
+ else:
+ self.fail(
+ msg="Error: No collector found. Unable to build host hash.")
+
+ if description:
+ h["description"] = description
+
+ if groups is not None and groups is not []:
+ self.module.debug("Group property exists")
+ groupids = ""
+
+ for group in groups:
+ groupids = groupids + str(self.create_group(group)) + ","
+
+ h["hostGroupIds"] = groupids.rstrip(',')
+
+ if properties is not None and properties is not {}:
+ self.module.debug("Properties hash exists")
+ propnum = 0
+ for key, value in properties.iteritems():
+ h["propName" + str(propnum)] = key
+ h["propValue" + str(propnum)] = value
+ propnum = propnum + 1
+
+ return h
+
+ def _verify_property(self, propname):
+ """Check with LogicMonitor server to
+ verify property is unchanged"""
+ self.module.debug("Running Host._verify_property...")
+
+ if self.info:
+ self.module.debug("Host is registered")
+ if propname not in self.properties:
+ self.module.debug("Property " + propname + " does not exist")
+ return False
+ else:
+ self.module.debug("Property " + propname + " exists")
+ h = {"hostId": self.info["id"],
+ "propName0": propname,
+ "propValue0": self.properties[propname]}
+
+ self.module.debug("Making RCP call to 'verifyProperties'")
+ resp = json.loads(self.rpc('verifyProperties', h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["match"]
+ else:
+ self.fail(
+ msg="Error: unable to get verification " +
+ "from server.\n%s" % resp["errmsg"])
+ else:
+ self.fail(
+ msg="Error: Host doesn't exist. Unable to verify properties")
+
+ def _compare_groups(self, hostresp):
+ """Function to compare the host's current
+ groups against provided groups"""
+ self.module.debug("Running Host._compare_groups")
+
+ g = []
+ fullpathinids = hostresp["fullPathInIds"]
+ self.module.debug("Building list of groups")
+ for path in fullpathinids:
+ if path != []:
+ h = {'hostGroupId': path[-1]}
+
+ hgresp = json.loads(self.rpc("getHostGroup", h))
+
+ if (hgresp["status"] == 200 and
+ hgresp["data"]["appliesTo"] == ""):
+
+ g.append(path[-1])
+
+ if self.groups is not None:
+ self.module.debug("Comparing group lists")
+ for group in self.groups:
+ groupjson = self.get_group(group)
+
+ if groupjson is None:
+ self.module.debug("Group mismatch. No result.")
+ return True
+ elif groupjson['id'] not in g:
+ self.module.debug("Group mismatch. ID doesn't exist.")
+ return True
+ else:
+ g.remove(groupjson['id'])
+
+ if g != []:
+ self.module.debug("Group mismatch. New ID exists.")
+ return True
+ self.module.debug("Groups match")
+
+ def _compare_props(self, propresp, ignore):
+ """Function to compare the host's current
+ properties against provided properties"""
+ self.module.debug("Running Host._compare_props...")
+ p = {}
+
+ self.module.debug("Creating list of properties")
+ for prop in propresp:
+ if prop["name"] not in ignore:
+ if ("*******" in prop["value"] and
+ self._verify_property(prop["name"])):
+ p[prop["name"]] = self.properties[prop["name"]]
+ else:
+ p[prop["name"]] = prop["value"]
+
+ self.module.debug("Comparing properties")
+ # Iterate provided properties and compare to received properties
+ for prop in self.properties:
+ if (prop not in p or
+ p[prop] != self.properties[prop]):
+ self.module.debug("Properties mismatch")
+ return True
+ self.module.debug("Properties match")
+
+ def _strip_groups(self, groups):
+ """Function to strip whitespace from group list.
+ This function provides the user some flexibility when
+ formatting group arguments """
+ self.module.debug("Running Host._strip_groups...")
+ return map(lambda x: x.strip(), groups)
+
+
+class Datasource(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor Datasource object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **params)
+ self.module.debug("Instantiating Datasource object")
+
+ self.id = self.params["id"]
+ self.starttime = self.params["starttime"]
+ self.duration = self.params["duration"]
+
+ def sdt(self):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Datasource.sdt...")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offsetstart = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"hostDataSourceId": self.id,
+ "type": 1,
+ "notifyCC": True,
+ "year": offsetstart.year,
+ "month": offsetstart.month-1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month-1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to 'setHostDataSourceSDT'")
+ resp = json.loads(self.rpc("setHostDataSourceSDT", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+
+
+class Hostgroup(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Hostgroup object")
+
+ self.fullpath = self.params["fullpath"]
+ self.info = self.get_group(self.fullpath)
+ self.properties = self.params["properties"]
+ self.description = self.params["description"]
+ self.starttime = self.params["starttime"]
+ self.duration = self.params["duration"]
+ self.alertenable = self.params["alertenable"]
+
+ def create(self):
+ """Wrapper for self.update()"""
+ self.module.debug("Running Hostgroup.create...")
+ self.update()
+
+ def get_properties(self, final=False):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Hostgroup.get_properties...")
+
+ if self.info:
+ self.module.debug("Group found")
+
+ self.module.debug("Making RPC call to 'getHostGroupProperties'")
+ properties_json = json.loads(self.rpc(
+ "getHostGroupProperties",
+ {'hostGroupId': self.info["id"],
+ "finalResult": final}))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug("Group not found")
+ return None
+
+ def set_properties(self, propertyhash):
+ """Update the host to have the properties
+ contained in the property hash"""
+ self.module.debug("Running Hostgroup.set_properties")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Assigning property has to host object")
+ self.properties = propertyhash
+
+ def add(self):
+ """Idempotent function to ensure that the host
+ group exists in your LogicMonitor account"""
+ self.module.debug("Running Hostgroup.add")
+
+ if self.info is None:
+ self.module.debug("Group doesn't exist. Creating.")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.create_group(self.fullpath)
+ self.info = self.get_group(self.fullpath)
+
+ self.module.debug("Group created")
+ return self.info
+ else:
+ self.module.debug("Group already exists")
+
+ def update(self):
+ """Idempotent function to ensure the host group settings
+ (alertenable, properties, etc) in the
+ LogicMonitor account match the current object."""
+ self.module.debug("Running Hostgroup.update")
+
+ if self.info:
+ if self.is_changed():
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = self._build_host_group_hash(
+ self.fullpath,
+ self.description,
+ self.properties,
+ self.alertenable)
+ h["opType"] = "replace"
+
+ if self.fullpath != "/":
+ h["id"] = self.info["id"]
+
+ self.module.debug("Making RPC call to 'updateHostGroup'")
+ resp = json.loads(self.rpc("updateHostGroup", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg="Error: Unable to update the " +
+ "host.\n" + resp["errmsg"])
+ else:
+ self.module.debug(
+ "Group properties match supplied properties. " +
+ "No changes to make"
+ )
+ return self.info
+ else:
+ self.module.debug("Group doesn't exist. Creating.")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ return self.add()
+
+ def remove(self):
+ """Idempotent function to ensure the host group
+ does not exist in your LogicMonitor account"""
+ self.module.debug("Running Hostgroup.remove...")
+
+ if self.info:
+ self.module.debug("Group exists")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Making RPC call to 'deleteHostGroup'")
+ resp = json.loads(self.rpc("deleteHostGroup",
+ {"hgId": self.info["id"]}))
+
+ if resp["status"] == 200:
+ self.module.debug(resp)
+ self.module.debug("RPC call succeeded")
+ return resp
+ elif resp["errmsg"] == "No such group":
+ self.module.debug("Group doesn't exist")
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+ self.fail(msg=resp["errmsg"])
+ else:
+ self.module.debug("Group doesn't exist")
+
+ def is_changed(self):
+ """Return true if the host doesn't match
+ the LogicMonitor account"""
+ self.module.debug("Running Hostgroup.is_changed...")
+
+ ignore = []
+ group = self.get_group(self.fullpath)
+ properties = self.get_properties()
+
+ if properties is not None and group is not None:
+ self.module.debug("Comparing simple group properties")
+ if (group["alertEnable"] != self.alertenable or
+ group["description"] != self.description):
+
+ return True
+
+ p = {}
+
+ self.module.debug("Creating list of properties")
+ for prop in properties:
+ if prop["name"] not in ignore:
+ if ("*******" in prop["value"] and
+ self._verify_property(prop["name"])):
+
+ p[prop["name"]] = (
+ self.properties[prop["name"]])
+ else:
+ p[prop["name"]] = prop["value"]
+
+ self.module.debug("Comparing properties")
+ if set(p) != set(self.properties):
+ return True
+ else:
+ self.module.debug("No property information received")
+ return False
+
+ def sdt(self, duration=30, starttime=None):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Hostgroup.sdt")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offset = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(
+ msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"hostGroupId": self.info["id"],
+ "type": 1,
+ "year": offsetstart.year,
+ "month": offsetstart.month-1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month-1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to setHostGroupSDT")
+ resp = json.loads(self.rpc("setHostGroupSDT", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+
+ def site_facts(self):
+ """Output current properties information for the Hostgroup"""
+ self.module.debug("Running Hostgroup.site_facts...")
+
+ if self.info:
+ self.module.debug("Group exists")
+ props = self.get_properties(True)
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Group doesn't exit.")
+
+ def _build_host_group_hash(self,
+ fullpath,
+ description,
+ properties,
+ alertenable):
+ """Return a property formated hash for the
+ creation of a hostgroup using the rpc function"""
+ self.module.debug("Running Hostgroup._build_host_hash")
+
+ h = {}
+ h["alertEnable"] = alertenable
+
+ if fullpath == "/":
+ self.module.debug("Group is root")
+ h["id"] = 1
+ else:
+ self.module.debug("Determining group path")
+ parentpath, name = fullpath.rsplit('/', 1)
+ parent = self.get_group(parentpath)
+
+ h["name"] = name
+
+ if parent:
+ self.module.debug("Parent group " +
+ str(parent["id"]) + " found.")
+ h["parentID"] = parent["id"]
+ else:
+ self.module.debug("No parent group found. Using root.")
+ h["parentID"] = 1
+
+ if description:
+ self.module.debug("Description property exists")
+ h["description"] = description
+
+ if properties != {}:
+ self.module.debug("Properties hash exists")
+ propnum = 0
+ for key, value in properties.iteritems():
+ h["propName" + str(propnum)] = key
+ h["propValue" + str(propnum)] = value
+ propnum = propnum + 1
+
+ return h
+
+ def _verify_property(self, propname):
+ """Check with LogicMonitor server
+ to verify property is unchanged"""
+ self.module.debug("Running Hostgroup._verify_property")
+
+ if self.info:
+ self.module.debug("Group exists")
+ if propname not in self.properties:
+ self.module.debug("Property " + propname + " does not exist")
+ return False
+ else:
+ self.module.debug("Property " + propname + " exists")
+ h = {"hostGroupId": self.info["id"],
+ "propName0": propname,
+ "propValue0": self.properties[propname]}
+
+ self.module.debug("Making RCP call to 'verifyProperties'")
+ resp = json.loads(self.rpc('verifyProperties', h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["match"]
+ else:
+ self.fail(
+ msg="Error: unable to get verification " +
+ "from server.\n%s" % resp["errmsg"])
+ else:
+ self.fail(
+ msg="Error: Group doesn't exist. Unable to verify properties")
+
+
+def selector(module):
+ """Figure out which object and which actions
+ to take given the right parameters"""
+
+ if module.params["target"] == "collector":
+ target = Collector(module.params, module)
+ elif module.params["target"] == "host":
+ # Make sure required parameter collector is specified
+ if ((module.params["action"] == "add" or
+ module.params["displayname"] is None) and
+ module.params["collector"] is None):
+ module.fail_json(
+ msg="Parameter 'collector' required.")
+
+ target = Host(module.params, module)
+ elif module.params["target"] == "datasource":
+ # Validate target specific required parameters
+ if module.params["id"] is not None:
+ # make sure a supported action was specified
+ if module.params["action"] == "sdt":
+ target = Datasource(module.params, module)
+ else:
+ errmsg = ("Error: Unexpected action \"" +
+ module.params["action"] + "\" was specified.")
+ module.fail_json(msg=errmsg)
+
+ elif module.params["target"] == "hostgroup":
+ # Validate target specific required parameters
+ if module.params["fullpath"] is not None:
+ target = Hostgroup(module.params, module)
+ else:
+ module.fail_json(
+ msg="Parameter 'fullpath' required for target 'hostgroup'")
+ else:
+ module.fail_json(
+ msg="Error: Unexpected target \"" + module.params["target"] +
+ "\" was specified.")
+
+ if module.params["action"].lower() == "add":
+ action = target.create
+ elif module.params["action"].lower() == "remove":
+ action = target.remove
+ elif module.params["action"].lower() == "sdt":
+ action = target.sdt
+ elif module.params["action"].lower() == "update":
+ action = target.update
+ else:
+ errmsg = ("Error: Unexpected action \"" + module.params["action"] +
+ "\" was specified.")
+ module.fail_json(msg=errmsg)
+
+ action()
+ module.exit_json(changed=target.change)
+
+
+def main():
+ TARGETS = [
+ "collector",
+ "host",
+ "datasource",
+ "hostgroup"]
+
+ ACTIONS = [
+ "add",
+ "remove",
+ "sdt",
+ "update"]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(required=True, default=None, choices=TARGETS),
+ action=dict(required=True, default=None, choices=ACTIONS),
+ company=dict(required=True, default=None),
+ user=dict(required=True, default=None),
+ password=dict(required=True, default=None, no_log=True),
+
+ collector=dict(required=False, default=None),
+ hostname=dict(required=False, default=None),
+ displayname=dict(required=False, default=None),
+ id=dict(required=False, default=None),
+ description=dict(required=False, default=""),
+ fullpath=dict(required=False, default=None),
+ starttime=dict(required=False, default=None),
+ duration=dict(required=False, default=30),
+ properties=dict(required=False, default={}, type="dict"),
+ groups=dict(required=False, default=[], type="list"),
+ alertenable=dict(required=False, default="true", choices=BOOLEANS)
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_LIB_JSON is not True:
+ module.fail_json(msg="Unable to load JSON library")
+
+ selector(module)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.urls import open_url
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/logicmonitor_facts.py b/lib/ansible/modules/extras/monitoring/logicmonitor_facts.py
new file mode 100644
index 0000000000..cc91ca6122
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/logicmonitor_facts.py
@@ -0,0 +1,632 @@
+#!/usr/bin/python
+
+"""LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups
+ Copyright (C) 2015 LogicMonitor
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA"""
+
+
+import socket
+import sys
+import types
+import urllib
+
+HAS_LIB_JSON = True
+try:
+ import json
+ # Detect the python-json library which is incompatible
+ # Look for simplejson if that's the case
+ try:
+ if (
+ not isinstance(json.loads, types.FunctionType) or
+ not isinstance(json.dumps, types.FunctionType)
+ ):
+ raise ImportError
+ except AttributeError:
+ raise ImportError
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ print(
+ '\n{"msg": "Error: ansible requires the stdlib json or ' +
+ 'simplejson module, neither was found!", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+ except SyntaxError:
+ print(
+ '\n{"msg": "SyntaxError: probably due to installed simplejson ' +
+ 'being for a different python version", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+
+
+DOCUMENTATION = '''
+---
+module: logicmonitor_facts
+short_description: Collect facts about LogicMonitor objects
+description:
+ - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform.
+ - This module collects facts about hosts abd host groups within your LogicMonitor account.
+version_added: "2.2"
+author: Ethan Culler-Mayeno, Jeff Wozniak
+notes:
+ - You must have an existing LogicMonitor account for this module to function.
+requirements: ["An existing LogicMonitor account", "Linux"]
+options:
+ target:
+ description:
+ - The LogicMonitor object you wish to manage.
+ required: true
+ default: null
+ choices: ['host', 'hostgroup']
+ company:
+ description:
+ - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes"
+ required: true
+ default: null
+ user:
+ description:
+ - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user
+ required: true
+ default: null
+ password:
+ description:
+ - The password for the chosen LogicMonitor User
+ - If an md5 hash is used, the digest flag must be set to true
+ required: true
+ default: null
+ collector:
+ description:
+ - The fully qualified domain name of a collector in your LogicMonitor account.
+ - This is optional for querying a LogicMonitor host when a displayname is specified
+ - This is required for querying a LogicMonitor host when a displayname is not specified
+ required: false
+ default: null
+ hostname:
+ description:
+ - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to add into monitoring.
+ - Required for managing hosts (target=host)
+ required: false
+ default: 'hostname -f'
+ displayname:
+ description:
+ - The display name of a host in your LogicMonitor account or the desired display name of a device to add into monitoring.
+ required: false
+ default: 'hostname -f'
+ fullpath:
+ description:
+ - The fullpath of the hostgroup object you would like to manage
+ - Recommend running on a single ansible host
+ - Required for management of LogicMonitor host groups (target=hostgroup)
+ required: false
+ default: null
+...
+'''
+
+EXAMPLES = '''
+#example of querying a list of hosts
+```
+---
+- hosts: hosts
+ user: root
+ vars:
+ company: 'yourcompany'
+ user: 'Luigi'
+ password: 'ImaLuigi,number1!'
+ tasks:
+ - name: query a list of hosts
+ # All tasks should use local_action
+ local_action:
+ logicmonitor_facts:
+ target: host
+ company: '{{ company }}'
+ user: '{{ user }}'
+ password: '{{ password }}'
+```
+
+#example of querying a hostgroup
+```
+---
+- hosts: somemachine.superheroes.com
+ user: root
+ vars:
+ company: 'yourcompany'
+ user: 'mario'
+ password: 'itsame.Mario!'
+ tasks:
+ - name: query a host group
+ # All tasks should use local_action
+ local_action:
+ logicmonitor_facts:
+ target: hostgroup
+ fullpath: '/servers/production'
+ company: '{{ company }}'
+ user: '{{ user }}'
+ password: '{{ password }}'
+```
+'''
+
+
+RETURN = '''
+---
+ ansible_facts:
+ description: LogicMonitor properties set for the specified object
+ returned: success
+ type: list of dicts containing name/value pairs
+ example: >
+ {
+ "name": "dc",
+ "value": "1"
+ },
+ {
+ "name": "type",
+ "value": "prod"
+ },
+ {
+ "name": "system.categories",
+ "value": ""
+ },
+ {
+ "name": "snmp.community",
+ "value": "********"
+ }
+...
+'''
+
+
+class LogicMonitor(object):
+
+ def __init__(self, module, **params):
+ self.__version__ = "1.0-python"
+ self.module = module
+ self.module.debug("Instantiating LogicMonitor object")
+
+ self.check_mode = False
+ self.company = params["company"]
+ self.user = params["user"]
+ self.password = params["password"]
+ self.fqdn = socket.getfqdn()
+ self.lm_url = "logicmonitor.com/santaba"
+ self.__version__ = self.__version__ + "-ansible-module"
+
+ def rpc(self, action, params):
+ """Make a call to the LogicMonitor RPC library
+ and return the response"""
+ self.module.debug("Running LogicMonitor.rpc")
+
+ param_str = urllib.urlencode(params)
+ creds = urllib.urlencode(
+ {"c": self.company,
+ "u": self.user,
+ "p": self.password})
+
+ if param_str:
+ param_str = param_str + "&"
+
+ param_str = param_str + creds
+
+ try:
+ url = ("https://" + self.company + "." + self.lm_url +
+ "/rpc/" + action + "?" + param_str)
+
+ # Set custom LogicMonitor header with version
+ headers = {"X-LM-User-Agent": self.__version__}
+
+ # Set headers
+ f = open_url(url, headers=headers)
+
+ raw = f.read()
+ resp = json.loads(raw)
+ if resp["status"] == 403:
+ self.module.debug("Authentication failed.")
+ self.fail(msg="Error: " + resp["errmsg"])
+ else:
+ return raw
+ except IOError:
+ self.fail(msg="Error: Unknown exception making RPC call")
+
+ def get_collectors(self):
+ """Returns a JSON object containing a list of
+ LogicMonitor collectors"""
+ self.module.debug("Running LogicMonitor.get_collectors...")
+
+ self.module.debug("Making RPC call to 'getAgents'")
+ resp = self.rpc("getAgents", {})
+ resp_json = json.loads(resp)
+
+ if resp_json["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ return resp_json["data"]
+ else:
+ self.fail(msg=resp)
+
+ def get_host_by_hostname(self, hostname, collector):
+ """Returns a host object for the host matching the
+ specified hostname"""
+ self.module.debug("Running LogicMonitor.get_host_by_hostname...")
+
+ self.module.debug("Looking for hostname " + hostname)
+ self.module.debug("Making RPC call to 'getHosts'")
+ hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1}))
+
+ if collector:
+ if hostlist_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ hosts = hostlist_json["data"]["hosts"]
+
+ self.module.debug(
+ "Looking for host matching: hostname " + hostname +
+ " and collector " + str(collector["id"]))
+
+ for host in hosts:
+ if (host["hostName"] == hostname and
+ host["agentId"] == collector["id"]):
+
+ self.module.debug("Host match found")
+ return host
+ self.module.debug("No host match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(hostlist_json)
+ else:
+ self.module.debug("No collector specified")
+ return None
+
+ def get_host_by_displayname(self, displayname):
+ """Returns a host object for the host matching the
+ specified display name"""
+ self.module.debug("Running LogicMonitor.get_host_by_displayname...")
+
+ self.module.debug("Looking for displayname " + displayname)
+ self.module.debug("Making RPC call to 'getHost'")
+ host_json = (json.loads(self.rpc("getHost",
+ {"displayName": displayname})))
+
+ if host_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return host_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(host_json)
+ return None
+
+ def get_collector_by_description(self, description):
+ """Returns a JSON collector object for the collector
+ matching the specified FQDN (description)"""
+ self.module.debug(
+ "Running LogicMonitor.get_collector_by_description..."
+ )
+
+ collector_list = self.get_collectors()
+ if collector_list is not None:
+ self.module.debug("Looking for collector with description " +
+ description)
+ for collector in collector_list:
+ if collector["description"] == description:
+ self.module.debug("Collector match found")
+ return collector
+ self.module.debug("No collector match found")
+ return None
+
+ def get_group(self, fullpath):
+ """Returns a JSON group object for the group matching the
+ specified path"""
+ self.module.debug("Running LogicMonitor.get_group...")
+
+ self.module.debug("Making RPC call to getHostGroups")
+ resp = json.loads(self.rpc("getHostGroups", {}))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC called succeeded")
+ groups = resp["data"]
+
+ self.module.debug("Looking for group matching " + fullpath)
+ for group in groups:
+ if group["fullPath"] == fullpath.lstrip('/'):
+ self.module.debug("Group match found")
+ return group
+
+ self.module.debug("No group match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+
+ return None
+
+ def create_group(self, fullpath):
+ """Recursively create a path of host groups.
+ Returns the id of the newly created hostgroup"""
+ self.module.debug("Running LogicMonitor.create_group...")
+
+ res = self.get_group(fullpath)
+ if res:
+ self.module.debug("Group " + fullpath + " exists.")
+ return res["id"]
+
+ if fullpath == "/":
+ self.module.debug("Specified group is root. Doing nothing.")
+ return 1
+ else:
+ self.module.debug("Creating group named " + fullpath)
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ parentpath, name = fullpath.rsplit('/', 1)
+ parentgroup = self.get_group(parentpath)
+
+ parentid = 1
+
+ if parentpath == "":
+ parentid = 1
+ elif parentgroup:
+ parentid = parentgroup["id"]
+ else:
+ parentid = self.create_group(parentpath)
+
+ h = None
+
+ # Determine if we're creating a group from host or hostgroup class
+ if hasattr(self, '_build_host_group_hash'):
+ h = self._build_host_group_hash(
+ fullpath,
+ self.description,
+ self.properties,
+ self.alertenable)
+ h["name"] = name
+ h["parentId"] = parentid
+ else:
+ h = {"name": name,
+ "parentId": parentid,
+ "alertEnable": True,
+ "description": ""}
+
+ self.module.debug("Making RPC call to 'addHostGroup'")
+ resp = json.loads(
+ self.rpc("addHostGroup", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["id"]
+ elif resp["errmsg"] == "The record already exists":
+ self.module.debug("The hostgroup already exists")
+ group = self.get_group(fullpath)
+ return group["id"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(
+ msg="Error: unable to create new hostgroup \"" + name +
+ "\".\n" + resp["errmsg"])
+
+ def fail(self, msg):
+ self.module.fail_json(msg=msg, changed=self.change)
+
+ def exit(self, changed):
+ self.module.debug("Changed: " + changed)
+ self.module.exit_json(changed=changed)
+
+ def output_info(self, info):
+ self.module.debug("Registering properties as Ansible facts")
+ self.module.exit_json(changed=False, ansible_facts=info)
+
+
+class Host(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+ self.collector = None
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Host object")
+
+ if self.params["hostname"]:
+ self.module.debug("Hostname is " + self.params["hostname"])
+ self.hostname = self.params['hostname']
+ else:
+ self.module.debug("No hostname specified. Using " + self.fqdn)
+ self.hostname = self.fqdn
+
+ if self.params["displayname"]:
+ self.module.debug("Display name is " + self.params["displayname"])
+ self.displayname = self.params['displayname']
+ else:
+ self.module.debug("No display name specified. Using " + self.fqdn)
+ self.displayname = self.fqdn
+
+ # Attempt to host information via display name of host name
+ self.module.debug("Attempting to find host by displayname " +
+ self.displayname)
+ info = self.get_host_by_displayname(self.displayname)
+
+ if info is not None:
+ self.module.debug("Host found by displayname")
+ # Used the host information to grab the collector description
+ # if not provided
+ if (not hasattr(self.params, "collector") and
+ "agentDescription" in info):
+ self.module.debug("Setting collector from host response. " +
+ "Collector " + info["agentDescription"])
+ self.params["collector"] = info["agentDescription"]
+ else:
+ self.module.debug("Host not found by displayname")
+
+ # At this point, a valid collector description is required for success
+ # Check that the description exists or fail
+ if self.params["collector"]:
+ self.module.debug("Collector specified is " +
+ self.params["collector"])
+ self.collector = (self.get_collector_by_description(
+ self.params["collector"]))
+ else:
+ self.fail(msg="No collector specified.")
+
+ # If the host wasn't found via displayname, attempt by hostname
+ if info is None:
+ self.module.debug("Attempting to find host by hostname " +
+ self.hostname)
+ info = self.get_host_by_hostname(self.hostname, self.collector)
+
+ self.info = info
+
+ def get_properties(self):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Host.get_properties...")
+
+ if self.info:
+ self.module.debug("Making RPC call to 'getHostProperties'")
+ properties_json = (json.loads(self.rpc("getHostProperties",
+ {'hostId': self.info["id"],
+ "filterSystemProperties": True})))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("Error: there was an issue retrieving the " +
+ "host properties")
+ self.module.debug(properties_json["errmsg"])
+
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug(
+ "Unable to find LogicMonitor host which matches " +
+ self.displayname + " (" + self.hostname + ")"
+ )
+ return None
+
+ def site_facts(self):
+ """Output current properties information for the Host"""
+ self.module.debug("Running Host.site_facts...")
+
+ if self.info:
+ self.module.debug("Host exists")
+ props = self.get_properties()
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Host doesn't exit.")
+
+
+class Hostgroup(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Hostgroup object")
+
+ self.fullpath = self.params["fullpath"]
+ self.info = self.get_group(self.fullpath)
+
+ def get_properties(self, final=False):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Hostgroup.get_properties...")
+
+ if self.info:
+ self.module.debug("Group found")
+
+ self.module.debug("Making RPC call to 'getHostGroupProperties'")
+ properties_json = json.loads(self.rpc(
+ "getHostGroupProperties",
+ {'hostGroupId': self.info["id"],
+ "finalResult": final}))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug("Group not found")
+ return None
+
+ def site_facts(self):
+ """Output current properties information for the Hostgroup"""
+ self.module.debug("Running Hostgroup.site_facts...")
+
+ if self.info:
+ self.module.debug("Group exists")
+ props = self.get_properties(True)
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Group doesn't exit.")
+
+
+def selector(module):
+ """Figure out which object and which actions
+ to take given the right parameters"""
+
+ if module.params["target"] == "host":
+ target = Host(module.params, module)
+ target.site_facts()
+ elif module.params["target"] == "hostgroup":
+ # Validate target specific required parameters
+ if module.params["fullpath"] is not None:
+ target = Hostgroup(module.params, module)
+ target.site_facts()
+ else:
+ module.fail_json(
+ msg="Parameter 'fullpath' required for target 'hostgroup'")
+ else:
+ module.fail_json(
+ msg="Error: Unexpected target \"" + module.params["target"] +
+ "\" was specified.")
+
+
+def main():
+ TARGETS = [
+ "host",
+ "hostgroup"]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(required=True, default=None, choices=TARGETS),
+ company=dict(required=True, default=None),
+ user=dict(required=True, default=None),
+ password=dict(required=True, default=None, no_log=True),
+
+ collector=dict(require=False, default=None),
+ hostname=dict(required=False, default=None),
+ displayname=dict(required=False, default=None),
+ fullpath=dict(required=False, default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_LIB_JSON is not True:
+ module.fail_json(msg="Unable to load JSON library")
+
+ selector(module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.urls import open_url
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/monit.py b/lib/ansible/modules/extras/monitoring/monit.py
new file mode 100644
index 0000000000..2983d5e49a
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/monit.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Darryl Stoflet <stoflet@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+import time
+
+DOCUMENTATION = '''
+---
+module: monit
+short_description: Manage the state of a program monitored via Monit
+description:
+ - Manage the state of a program monitored via I(Monit)
+version_added: "1.2"
+options:
+ name:
+ description:
+ - The name of the I(monit) program/process to manage
+ required: true
+ default: null
+ state:
+ description:
+ - The state of service
+ required: true
+ default: null
+ choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
+ timeout:
+ description:
+ - If there are pending actions for the service monitored by monit, then Ansible will check
+ for up to this many seconds to verify the the requested action has been performed.
+ Ansible will sleep for five seconds between each check.
+ required: false
+ default: 300
+ version_added: "2.1"
+requirements: [ ]
+author: "Darryl Stoflet (@dstoflet)"
+'''
+
+EXAMPLES = '''
+# Manage the state of program "httpd" to be in "started" state.
+- monit: name=httpd state=started
+'''
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ timeout=dict(default=300, type='int'),
+ state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ MONIT = module.get_bin_path('monit', True)
+
+ def status():
+ """Return the status of the process in monit, or the empty string if not present."""
+ rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True)
+ for line in out.split('\n'):
+ # Sample output lines:
+ # Process 'name' Running
+ # Process 'name' Running - restart pending
+ parts = line.split()
+ if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
+ return ' '.join(parts[2:]).lower()
+ else:
+ return ''
+
+ def run_command(command):
+ """Runs a monit command, and returns the new status."""
+ module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
+ return status()
+
+ def wait_for_monit_to_stop_pending():
+ """Fails this run if there is no status or it's pending/initalizing for timeout"""
+ timeout_time = time.time() + timeout
+ sleep_time = 5
+
+ running_status = status()
+ while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
+ if time.time() >= timeout_time:
+ module.fail_json(
+ msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
+ running_status
+ ),
+ state=state
+ )
+
+ time.sleep(sleep_time)
+ running_status = status()
+
+ if state == 'reloaded':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command('%s reload' % MONIT)
+ if rc != 0:
+ module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
+ wait_for_monit_to_stop_pending()
+ module.exit_json(changed=True, name=name, state=state)
+
+ present = status() != ''
+
+ if not present and not state == 'present':
+ module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
+
+ if state == 'present':
+ if not present:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('reload')
+ if status == '':
+ wait_for_monit_to_stop_pending()
+ module.exit_json(changed=True, name=name, state=state)
+ module.exit_json(changed=False, name=name, state=state)
+
+ wait_for_monit_to_stop_pending()
+ running = 'running' in status()
+
+ if running and state in ['started', 'monitored']:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if running and state == 'stopped':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('stop')
+ if status in ['not monitored'] or 'stop pending' in status:
+ module.exit_json(changed=True, name=name, state=state)
+ module.fail_json(msg='%s process not stopped' % name, status=status)
+
+ if running and state == 'unmonitored':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('unmonitor')
+ if status in ['not monitored'] or 'unmonitor pending' in status:
+ module.exit_json(changed=True, name=name, state=state)
+ module.fail_json(msg='%s process not unmonitored' % name, status=status)
+
+ elif state == 'restarted':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('restart')
+ if status in ['initializing', 'running'] or 'restart pending' in status:
+ module.exit_json(changed=True, name=name, state=state)
+ module.fail_json(msg='%s process not restarted' % name, status=status)
+
+ elif not running and state == 'started':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('start')
+ if status in ['initializing', 'running'] or 'start pending' in status:
+ module.exit_json(changed=True, name=name, state=state)
+ module.fail_json(msg='%s process not started' % name, status=status)
+
+ elif not running and state == 'monitored':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ status = run_command('monitor')
+ if status not in ['not monitored']:
+ module.exit_json(changed=True, name=name, state=state)
+ module.fail_json(msg='%s process not monitored' % name, status=status)
+
+ module.exit_json(changed=False, name=name, state=state)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/lib/ansible/modules/extras/monitoring/nagios.py b/lib/ansible/modules/extras/monitoring/nagios.py
new file mode 100644
index 0000000000..689e9f0903
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/nagios.py
@@ -0,0 +1,1030 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is largely copied from the Nagios module included in the
+# Func project. Original copyright follows:
+#
+# func-nagios - Schedule downtime and enables/disable notifications
+# Copyright 2011, Red Hat, Inc.
+# Tim Bielawa <tbielawa@redhat.com>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license version 2 or any later version.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: nagios
+short_description: Perform common tasks in Nagios related to downtime and notifications.
+description:
+ - "The M(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
+ - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on.
+ - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
+ - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
+ - When using the M(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter.
+version_added: "0.7"
+options:
+ action:
+ description:
+ - Action to take.
+ - servicegroup options were added in 2.0.
+ - delete_downtime options were added in 2.2.
+ required: true
+ choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
+ "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
+ "servicegroup_host_downtime" ]
+ host:
+ description:
+ - Host to operate on in Nagios.
+ required: false
+ default: null
+ cmdfile:
+ description:
+ - Path to the nagios I(command file) (FIFO pipe).
+ Only required if auto-detection fails.
+ required: false
+ default: auto-detected
+ author:
+ description:
+ - Author to leave downtime comments as.
+ Only usable with the C(downtime) action.
+ required: false
+ default: Ansible
+ comment:
+ version_added: "2.0"
+ description:
+ - Comment for C(downtime) action.
+ required: false
+ default: Scheduling downtime
+ minutes:
+ description:
+ - Minutes to schedule downtime for.
+ - Only usable with the C(downtime) action.
+ required: false
+ default: 30
+ services:
+ description:
+ - What to manage downtime/alerts for. Separate multiple services with commas.
+ C(service) is an alias for C(services).
+ B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions.
+ aliases: [ "service" ]
+ required: true
+ servicegroup:
+ version_added: "2.0"
+ description:
+ - The Servicegroup we want to set downtimes/alerts for.
+ B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
+ command:
+ description:
+ - The raw command to send to nagios, which
+ should not include the submitted time header or the line-feed
+ B(Required) option when using the C(command) action.
+ required: true
+
+author: "Tim Bielawa (@tbielawa)"
+'''
+
+EXAMPLES = '''
+# set 30 minutes of apache downtime
+- nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }}
+
+# schedule an hour of HOST downtime
+- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
+
+# schedule an hour of HOST downtime, with a comment describing the reason
+- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
+ comment='This host needs disciplined'
+
+# schedule downtime for ALL services on HOST
+- nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }}
+
+# schedule downtime for a few services
+- nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }}
+
+# set 30 minutes downtime for all services in servicegroup foo
+- nagios: action=servicegroup_service_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
+
+# set 30 minutes downtime for all host in servicegroup foo
+- nagios: action=servicegroup_host_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
+
+# delete all downtime for a given host
+- nagios: action=delete_downtime host={{ inventory_hostname }} service=all
+
+# delete all downtime for HOST with a particular comment
+- nagios: action=delete_downtime host={{ inventory_hostname }} service=host comment="Planned maintenance"
+
+# enable SMART disk alerts
+- nagios: action=enable_alerts service=smart host={{ inventory_hostname }}
+
+# "two services at once: disable httpd and nfs alerts"
+- nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }}
+
+# disable HOST alerts
+- nagios: action=disable_alerts service=host host={{ inventory_hostname }}
+
+# silence ALL alerts
+- nagios: action=silence host={{ inventory_hostname }}
+
+# unsilence all alerts
+- nagios: action=unsilence host={{ inventory_hostname }}
+
+# SHUT UP NAGIOS
+- nagios: action=silence_nagios
+
+# ANNOY ME NAGIOS
+- nagios: action=unsilence_nagios
+
+# command something
+- nagios: action=command command='DISABLE_FAILURE_PREDICTION'
+'''
+
+import ConfigParser
+import types
+import time
+import os.path
+
+######################################################################
+
+
+def which_cmdfile():
+ locations = [
+ # rhel
+ '/etc/nagios/nagios.cfg',
+ # debian
+ '/etc/nagios3/nagios.cfg',
+ # older debian
+ '/etc/nagios2/nagios.cfg',
+ # bsd, solaris
+ '/usr/local/etc/nagios/nagios.cfg',
+ # groundwork it monitoring
+ '/usr/local/groundwork/nagios/etc/nagios.cfg',
+ # open monitoring distribution
+ '/omd/sites/oppy/tmp/nagios/nagios.cfg',
+ # ???
+ '/usr/local/nagios/etc/nagios.cfg',
+ '/usr/local/nagios/nagios.cfg',
+ '/opt/nagios/etc/nagios.cfg',
+ '/opt/nagios/nagios.cfg',
+ # icinga on debian/ubuntu
+ '/etc/icinga/icinga.cfg',
+ # icinga installed from source (default location)
+ '/usr/local/icinga/etc/icinga.cfg',
+ ]
+
+ for path in locations:
+ if os.path.exists(path):
+ for line in open(path):
+ if line.startswith('command_file'):
+ return line.split('=')[1].strip()
+
+ return None
+
+######################################################################
+
+
+def main():
+ ACTION_CHOICES = [
+ 'downtime',
+ 'delete_downtime',
+ 'silence',
+ 'unsilence',
+ 'enable_alerts',
+ 'disable_alerts',
+ 'silence_nagios',
+ 'unsilence_nagios',
+ 'command',
+ 'servicegroup_host_downtime',
+ 'servicegroup_service_downtime',
+ ]
+
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, default=None, choices=ACTION_CHOICES),
+ author=dict(default='Ansible'),
+ comment=dict(default='Scheduling downtime'),
+ host=dict(required=False, default=None),
+ servicegroup=dict(required=False, default=None),
+ minutes=dict(default=30),
+ cmdfile=dict(default=which_cmdfile()),
+ services=dict(default=None, aliases=['service']),
+ command=dict(required=False, default=None),
+ )
+ )
+
+ action = module.params['action']
+ host = module.params['host']
+ servicegroup = module.params['servicegroup']
+ minutes = module.params['minutes']
+ services = module.params['services']
+ cmdfile = module.params['cmdfile']
+ command = module.params['command']
+
+ ##################################################################
+ # Required args per action:
+ # downtime = (minutes, service, host)
+ # (un)silence = (host)
+ # (enable/disable)_alerts = (service, host)
+ # command = command
+ #
+ # AnsibleModule will verify most stuff, we need to verify
+ # 'minutes' and 'service' manually.
+
+ ##################################################################
+ if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
+ if not host:
+ module.fail_json(msg='no host specified for action requiring one')
+ ######################################################################
+ if action == 'downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+ # Make sure minutes is a number
+ try:
+ m = int(minutes)
+ if not isinstance(m, types.IntType):
+ module.fail_json(msg='minutes must be a number')
+ except Exception:
+ module.fail_json(msg='invalid entry for minutes')
+
+ ######################################################################
+ if action == 'delete_downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
+ ######################################################################
+
+ if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
+ # Make sure there's an actual servicegroup selected
+ if not servicegroup:
+ module.fail_json(msg='no servicegroup selected to set downtime for')
+ # Make sure minutes is a number
+ try:
+ m = int(minutes)
+ if not isinstance(m, types.IntType):
+ module.fail_json(msg='minutes must be a number')
+ except Exception:
+ module.fail_json(msg='invalid entry for minutes')
+
+ ##################################################################
+ if action in ['enable_alerts', 'disable_alerts']:
+ if not services:
+ module.fail_json(msg='a service is required when setting alerts')
+
+ if action in ['command']:
+ if not command:
+ module.fail_json(msg='no command passed for command action')
+ ##################################################################
+ if not cmdfile:
+ module.fail_json(msg='unable to locate nagios.cfg')
+
+ ##################################################################
+ ansible_nagios = Nagios(module, **module.params)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ ansible_nagios.act()
+ ##################################################################
+
+
+######################################################################
+class Nagios(object):
+ """
+ Perform common tasks in Nagios related to downtime and
+ notifications.
+
+ The complete set of external commands Nagios handles is documented
+ on their website:
+
+ http://old.nagios.org/developerinfo/externalcommands/commandlist.php
+
+ Note that in the case of `schedule_svc_downtime`,
+ `enable_svc_notifications`, and `disable_svc_notifications`, the
+ service argument should be passed as a list.
+ """
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.action = kwargs['action']
+ self.author = kwargs['author']
+ self.comment = kwargs['comment']
+ self.host = kwargs['host']
+ self.servicegroup = kwargs['servicegroup']
+ self.minutes = int(kwargs['minutes'])
+ self.cmdfile = kwargs['cmdfile']
+ self.command = kwargs['command']
+
+ if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
+ self.services = kwargs['services']
+ else:
+ self.services = kwargs['services'].split(',')
+
+ self.command_results = []
+
+ def _now(self):
+ """
+ The time in seconds since 12:00:00AM Jan 1, 1970
+ """
+
+ return int(time.time())
+
+ def _write_command(self, cmd):
+ """
+ Write the given command to the Nagios command file
+ """
+
+ try:
+ fp = open(self.cmdfile, 'w')
+ fp.write(cmd)
+ fp.flush()
+ fp.close()
+ self.command_results.append(cmd.strip())
+ except IOError:
+ self.module.fail_json(msg='unable to write to nagios command file',
+ cmdfile=self.cmdfile)
+
+ def _fmt_dt_str(self, cmd, host, duration, author=None,
+ comment=None, start=None,
+ svc=None, fixed=1, trigger=0):
+ """
+ Format an external-command downtime string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ duration - Minutes to schedule downtime for
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ Default is to use the entry time (now)
+ svc - Service to schedule downtime for, omit when for host downtime
+ fixed - Start now if 1, start when a problem is detected if 0
+ trigger - Optional ID of event to start downtime from. Leave as 0 for
+ fixed downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ entry_time = self._now()
+ if start is None:
+ start = entry_time
+
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+ duration_s = (duration * 60)
+ end = start + duration_s
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ dt_args = [str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+
+ dt_arg_str = ";".join(dt_args)
+ dt_str = hdr + dt_arg_str + "\n"
+
+ return dt_str
+
+ def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
+ """
+ Format an external-command downtime deletion string.
+
+ cmd - Nagios command ID
+ host - Host to remove scheduled downtime from
+ comment - Reason downtime was added (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ svc - Service to remove downtime for, omit to remove all downtime for the host
+
+ Syntax: [submitted] COMMAND;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if comment is None:
+ comment = self.comment
+
+ dt_del_args = []
+ if svc is not None:
+ dt_del_args.append(svc)
+ else:
+ dt_del_args.append('')
+
+ if start is not None:
+ dt_del_args.append(str(start))
+ else:
+ dt_del_args.append('')
+
+ if comment is not None:
+ dt_del_args.append(comment)
+ else:
+ dt_del_args.append('')
+
+ dt_del_arg_str = ";".join(dt_del_args)
+ dt_del_str = hdr + dt_del_arg_str + "\n"
+
+ return dt_del_str
+
+ def _fmt_notif_str(self, cmd, host=None, svc=None):
+ """
+ Format an external-command notification string.
+
+ cmd - Nagios command ID.
+ host - Host to en/disable notifications on.. A value is not required
+ for global downtime
+ svc - Service to schedule downtime for. A value is not required
+ for host downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
+ """
+
+ entry_time = self._now()
+ notif_str = "[%s] %s" % (entry_time, cmd)
+ if host is not None:
+ notif_str += ";%s" % host
+
+ if svc is not None:
+ notif_str += ";%s" % svc
+
+ notif_str += "\n"
+
+ return notif_str
+
+ def schedule_svc_downtime(self, host, services=None, minutes=30):
+ """
+ This command is used to schedule downtime for a particular
+ service.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the service.
+
+ Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SVC_DOWNTIME"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service)
+ self._write_command(dt_cmd_str)
+
+ def schedule_host_downtime(self, host, minutes=30):
+ """
+ This command is used to schedule downtime for a particular
+ host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
+ self._write_command(dt_cmd_str)
+
+ def schedule_host_svc_downtime(self, host, minutes=30):
+ """
+ This command is used to schedule downtime for
+ all services associated with a particular host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
+ self._write_command(dt_cmd_str)
+
+ def delete_host_downtime(self, host, services=None, comment=None):
+ """
+ This command is used to remove scheduled downtime for a particular
+ host.
+
+ Syntax: DEL_DOWNTIME_BY_HOST_NAME;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ cmd = "DEL_DOWNTIME_BY_HOST_NAME"
+
+ if services is None:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
+ self._write_command(dt_del_cmd_str)
+ else:
+ for service in services:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
+ self._write_command(dt_del_cmd_str)
+
+
+ def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
+ self._write_command(dt_cmd_str)
+
+ def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30):
+ """
+ This command is used to schedule downtime for all services in
+ a particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30):
+ """
+ This command is used to schedule downtime for all services in
+ a particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
+ self._write_command(dt_cmd_str)
+
+ def disable_host_svc_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services on the specified host.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_host_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified host.
+
+ Note that this command does not disable notifications for
+ services associated with this host.
+
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_svc_notifications(self, host, services=None):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified service.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "DISABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_host_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all hosts in the specified servicegroup.
+
+ Note that this command does not disable notifications for
+ services associated with hosts in this service group.
+
+ Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services in the specified servicegroup.
+
+ Note that this does not prevent notifications from being sent
+ out about the hosts in this servicegroup.
+
+ Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Disables notifications for all hosts in a particular
+ hostgroup.
+
+ Note that this does not disable notifications for the services
+ associated with the hosts in the hostgroup - see the
+ DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
+
+ Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Disables notifications for all services associated with hosts
+ in a particular hostgroup.
+
+ Note that this does not disable notifications for the hosts in
+ the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
+ command for that.
+
+ Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def enable_host_notifications(self, host):
+ """
+ Enables notifications for a particular host.
+
+ Note that this command does not enable notifications for
+ services associated with this host.
+
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def enable_host_svc_notifications(self, host):
+ """
+ Enables notifications for all services on the specified host.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_svc_notifications(self, host, services=None):
+ """
+ Enables notifications for a particular service.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "ENABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ nagios_return = True
+ return_str_list = []
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Enables notifications for all hosts in a particular hostgroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Enables notifications for all services that are associated
+ with hosts in a particular hostgroup.
+
+ Note that this does not enable notifications for the hosts in
+ this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_host_notifications(self, servicegroup):
+ """
+ Enables notifications for all hosts that have services that
+ are members of a particular servicegroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ Enables notifications for all services that are members of a
+ particular servicegroup.
+
+ Note that this does not enable notifications for the hosts in
+ this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_host(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the host and all services on the specified host.
+
+ This is equivalent to calling disable_host_svc_notifications
+ and disable_host_notifications.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "DISABLE_HOST_SVC_NOTIFICATIONS",
+ "DISABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def unsilence_host(self, host):
+ """
+ This command is used to enable notifications for the host and
+ all services on the specified host.
+
+ This is equivalent to calling enable_host_svc_notifications
+ and enable_host_notifications.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "ENABLE_HOST_SVC_NOTIFICATIONS",
+ "ENABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_nagios(self):
+ """
+ This command is used to disable notifications for all hosts and services
+ in nagios.
+
+ This is a 'SHUT UP, NAGIOS' command
+ """
+ cmd = 'DISABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def unsilence_nagios(self):
+ """
+ This command is used to enable notifications for all hosts and services
+ in nagios.
+
+ This is a 'OK, NAGIOS, GO'' command
+ """
+ cmd = 'ENABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def nagios_cmd(self, cmd):
+ """
+ This sends an arbitrary command to nagios
+
+ It prepends the submitted time and appends a \n
+
+ You just have to provide the properly formatted command
+ """
+
+ pre = '[%s]' % int(time.time())
+
+ post = '\n'
+ cmdstr = '%s %s%s' % (pre, cmd, post)
+ self._write_command(cmdstr)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do the
+ needful (at the earliest).
+ """
+ # host or service downtime?
+ if self.action == 'downtime':
+ if self.services == 'host':
+ self.schedule_host_downtime(self.host, self.minutes)
+ elif self.services == 'all':
+ self.schedule_host_svc_downtime(self.host, self.minutes)
+ else:
+ self.schedule_svc_downtime(self.host,
+ services=self.services,
+ minutes=self.minutes)
+
+ elif self.action == 'delete_downtime':
+ if self.services=='host':
+ self.delete_host_downtime(self.host)
+ elif self.services=='all':
+ self.delete_host_downtime(self.host, comment='')
+ else:
+ self.delete_host_downtime(self.host, services=self.services)
+
+ elif self.action == "servicegroup_host_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
+ elif self.action == "servicegroup_service_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
+
+ # toggle the host AND service alerts
+ elif self.action == 'silence':
+ self.silence_host(self.host)
+
+ elif self.action == 'unsilence':
+ self.unsilence_host(self.host)
+
+ # toggle host/svc alerts
+ elif self.action == 'enable_alerts':
+ if self.services == 'host':
+ self.enable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.enable_host_svc_notifications(self.host)
+ else:
+ self.enable_svc_notifications(self.host,
+ services=self.services)
+
+ elif self.action == 'disable_alerts':
+ if self.services == 'host':
+ self.disable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.disable_host_svc_notifications(self.host)
+ else:
+ self.disable_svc_notifications(self.host,
+ services=self.services)
+ elif self.action == 'silence_nagios':
+ self.silence_nagios()
+
+ elif self.action == 'unsilence_nagios':
+ self.unsilence_nagios()
+
+ elif self.action == 'command':
+ self.nagios_cmd(self.command)
+
+ # wtf?
+ else:
+ self.module.fail_json(msg="unknown action specified: '%s'" % \
+ self.action)
+
+ self.module.exit_json(nagios_commands=self.command_results,
+ changed=True)
+
+######################################################################
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/newrelic_deployment.py b/lib/ansible/modules/extras/monitoring/newrelic_deployment.py
new file mode 100644
index 0000000000..3d9bc6c0ec
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/newrelic_deployment.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: newrelic_deployment
+version_added: "1.2"
+author: "Matt Coddington (@mcodd)"
+short_description: Notify newrelic about app deployments
+description:
+ - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api)
+options:
+ token:
+ description:
+ - API token, to place in the x-api-key header.
+ required: true
+ app_name:
+ description:
+ - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application
+ required: false
+ application_id:
+ description:
+ - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM
+ required: false
+ changelog:
+ description:
+ - A list of changes for this deployment
+ required: false
+ description:
+ description:
+ - Text annotation for the deployment - notes for you
+ required: false
+ revision:
+ description:
+ - A revision number (e.g., git commit SHA)
+ required: false
+ user:
+ description:
+ - The name of the user/process that triggered this deployment
+ required: false
+ appname:
+ description:
+ - Name of the application
+ required: false
+ environment:
+ description:
+ - The environment for this deployment
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- newrelic_deployment: token=AAAAAA
+ app_name=myapp
+ user='ansible deployment'
+ revision=1.0
+'''
+
+import urllib
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True),
+ app_name=dict(required=False),
+ application_id=dict(required=False),
+ changelog=dict(required=False),
+ description=dict(required=False),
+ revision=dict(required=False),
+ user=dict(required=False),
+ appname=dict(required=False),
+ environment=dict(required=False),
+ validate_certs = dict(default='yes', type='bool'),
+ ),
+ required_one_of=[['app_name', 'application_id']],
+ supports_check_mode=True
+ )
+
+ # build list of params
+ params = {}
+ if module.params["app_name"] and module.params["application_id"]:
+ module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
+
+ if module.params["app_name"]:
+ params["app_name"] = module.params["app_name"]
+ elif module.params["application_id"]:
+ params["application_id"] = module.params["application_id"]
+ else:
+ module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
+
+ for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]:
+ if module.params[item]:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Send the data to NewRelic
+ url = "https://rpm.newrelic.com/deployments.xml"
+ data = urllib.urlencode(params)
+ headers = {
+ 'x-api-key': module.params["token"],
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] in (200, 201):
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
+
diff --git a/lib/ansible/modules/extras/monitoring/pagerduty.py b/lib/ansible/modules/extras/monitoring/pagerduty.py
new file mode 100644
index 0000000000..99a9be8a04
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/pagerduty.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+
+module: pagerduty
+short_description: Create PagerDuty maintenance windows
+description:
+ - This module will let you create PagerDuty maintenance windows
+version_added: "1.2"
+author:
+ - "Andrew Newdigate (@suprememoocow)"
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns"
+ - "Bruce Pennypacker"
+requirements:
+ - PagerDuty API access
+options:
+ state:
+ description:
+ - Create a maintenance window or get a list of ongoing windows.
+ required: true
+ default: null
+ choices: [ "running", "started", "ongoing", "absent" ]
+ aliases: []
+ name:
+ description:
+ - PagerDuty unique subdomain.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ user:
+ description:
+ - PagerDuty user ID.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ passwd:
+ description:
+ - PagerDuty user password.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ token:
+ description:
+ - A pagerduty token, generated on the pagerduty site. Can be used instead of
+ user/passwd combination.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ version_added: '1.8'
+ requester_id:
+ description:
+ - ID of user making the request. Only needed when using a token and creating a maintenance_window.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ version_added: '1.8'
+ service:
+ description:
+ - A comma separated list of PagerDuty service IDs.
+ required: false
+ default: null
+ choices: []
+ aliases: [ services ]
+ hours:
+ description:
+ - Length of maintenance window in hours.
+ required: false
+ default: 1
+ choices: []
+ aliases: []
+ minutes:
+ description:
+ - Maintenance window in minutes (this is added to the hours).
+ required: false
+ default: 0
+ choices: []
+ aliases: []
+ version_added: '1.8'
+ desc:
+ description:
+ - Short description of maintenance window.
+ required: false
+ default: Created by Ansible
+ choices: []
+ aliases: []
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+'''
+
+EXAMPLES='''
+# List ongoing maintenance windows using a user/passwd
+- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing
+
+# List ongoing maintenance windows using a token
+- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing
+
+# Create a 1 hour maintenance window for service FOO123, using a user/passwd
+- pagerduty: name=companyabc
+ user=example@example.com
+ passwd=password123
+ state=running
+ service=FOO123
+
+# Create a 5 minute maintenance window for service FOO123, using a token
+- pagerduty: name=companyabc
+ token=xxxxxxxxxxxxxx
+ hours=0
+ minutes=5
+ state=running
+ service=FOO123
+
+
+# Create a 4 hour maintenance window for service FOO123 with the description "deployment".
+- pagerduty: name=companyabc
+ user=example@example.com
+ passwd=password123
+ state=running
+ service=FOO123
+ hours=4
+ desc=deployment
+ register: pd_window
+
+# Delete the previous maintenance window
+- pagerduty: name=companyabc
+ user=example@example.com
+ passwd=password123
+ state=absent
+ service={{ pd_window.result.maintenance_window.id }}
+'''
+
+import datetime
+import base64
+
+def auth_header(user, passwd, token):
+ if token:
+ return "Token token=%s" % token
+
+ auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
+ return "Basic %s" % auth
+
+def ongoing(module, name, user, passwd, token):
+ url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing"
+ headers = {"Authorization": auth_header(user, passwd, token)}
+
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
+
+ try:
+ json_out = json.loads(response.read())
+ except:
+ json_out = ""
+
+ return False, json_out, False
+
+
+def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc):
+ now = datetime.datetime.utcnow()
+ later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
+ start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
+ end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows"
+ headers = {
+ 'Authorization': auth_header(user, passwd, token),
+ 'Content-Type' : 'application/json',
+ }
+ request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}}
+
+ if requester_id:
+ request_data['requester_id'] = requester_id
+ else:
+ if token:
+ module.fail_json(msg="requester_id is required when using a token")
+
+ data = json.dumps(request_data)
+ response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to create the window: %s" % info['msg'])
+
+ try:
+ json_out = json.loads(response.read())
+ except:
+ json_out = ""
+
+ return False, json_out, True
+
+def absent(module, name, user, passwd, token, requester_id, service):
+ url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0]
+ headers = {
+ 'Authorization': auth_header(user, passwd, token),
+ 'Content-Type' : 'application/json',
+ }
+ request_data = {}
+
+ if requester_id:
+ request_data['requester_id'] = requester_id
+ else:
+ if token:
+ module.fail_json(msg="requester_id is required when using a token")
+
+ data = json.dumps(request_data)
+ response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to delete the window: %s" % info['msg'])
+
+ try:
+ json_out = json.loads(response.read())
+ except:
+ json_out = ""
+
+ return False, json_out, True
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
+ name=dict(required=True),
+ user=dict(required=False),
+ passwd=dict(required=False),
+ token=dict(required=False),
+ service=dict(required=False, type='list', aliases=["services"]),
+ requester_id=dict(required=False),
+ hours=dict(default='1', required=False),
+ minutes=dict(default='0', required=False),
+ desc=dict(default='Created by Ansible', required=False),
+ validate_certs = dict(default='yes', type='bool'),
+ )
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ user = module.params['user']
+ passwd = module.params['passwd']
+ token = module.params['token']
+ service = module.params['service']
+ hours = module.params['hours']
+ minutes = module.params['minutes']
+ token = module.params['token']
+ desc = module.params['desc']
+ requester_id = module.params['requester_id']
+
+ if not token and not (user or passwd):
+ module.fail_json(msg="neither user and passwd nor token specified")
+
+ if state == "running" or state == "started":
+ if not service:
+ module.fail_json(msg="service not specified")
+ (rc, out, changed) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
+ if rc == 0:
+ changed=True
+
+ if state == "ongoing":
+ (rc, out, changed) = ongoing(module, name, user, passwd, token)
+
+ if state == "absent":
+ (rc, out, changed) = absent(module, name, user, passwd, token, requester_id, service)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+
+ module.exit_json(msg="success", result=out, changed=changed)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/lib/ansible/modules/extras/monitoring/pagerduty_alert.py b/lib/ansible/modules/extras/monitoring/pagerduty_alert.py
new file mode 100644
index 0000000000..e2d127f015
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/pagerduty_alert.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+
+module: pagerduty_alert
+short_description: Trigger, acknowledge or resolve PagerDuty incidents
+description:
+ - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
+version_added: "1.9"
+author:
+ - "Amanpreet Singh (@aps-sids)"
+requirements:
+ - PagerDuty API access
+options:
+ name:
+ description:
+ - PagerDuty unique subdomain.
+ required: true
+ service_key:
+ description:
+ - The GUID of one of your "Generic API" services.
+ - This is the "service key" listed on a Generic API's service detail page.
+ required: true
+ state:
+ description:
+ - Type of event to be sent.
+ required: true
+ choices:
+ - 'triggered'
+ - 'acknowledged'
+ - 'resolved'
+ api_key:
+ description:
+ - The pagerduty API key (readonly access), generated on the pagerduty site.
+ required: true
+ desc:
+ description:
+ - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The maximum length is 1024 characters.
+ - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
+ required: false
+ default: Created via Ansible
+ incident_key:
+ description:
+ - Identifies the incident to which this I(state) should be applied.
+ - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" problem reports.
+ - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
+ required: false
+ client:
+ description:
+ - The name of the monitoring client that is triggering this event.
+ required: false
+ client_url:
+ description:
+ - The URL of the monitoring client that is triggering this event.
+ required: false
+'''
+
+EXAMPLES = '''
+# Trigger an incident with just the basic options
+- pagerduty_alert:
+ name: companyabc
+ service_key=xxx
+ api_key:yourapikey
+ state=triggered
+ desc="problem that led to this trigger"
+
+# Trigger an incident with more options
+- pagerduty_alert:
+ service_key=xxx
+ api_key=yourapikey
+ state=triggered
+ desc="problem that led to this trigger"
+ incident_key=somekey
+ client="Sample Monitoring Service"
+ client_url=http://service.example.com
+
+# Acknowledge an incident based on incident_key
+- pagerduty_alert:
+ service_key=xxx
+ api_key=yourapikey
+ state=acknowledged
+ incident_key=somekey
+ desc="some text for incident's log"
+
+# Resolve an incident based on incident_key
+- pagerduty_alert:
+ service_key=xxx
+ api_key=yourapikey
+ state=resolved
+ incident_key=somekey
+ desc="some text for incident's log"
+'''
+
+
+def check(module, name, state, service_key, api_key, incident_key=None):
+ url = "https://%s.pagerduty.com/api/v1/incidents" % name
+ headers = {
+ "Content-type": "application/json",
+ "Authorization": "Token token=%s" % api_key
+ }
+
+ data = {
+ "service_key": service_key,
+ "incident_key": incident_key,
+ "sort_by": "incident_number:desc"
+ }
+
+ response, info = fetch_url(module, url, method='get',
+ headers=headers, data=json.dumps(data))
+
+ if info['status'] != 200:
+ module.fail_json(msg="failed to check current incident status."
+ "Reason: %s" % info['msg'])
+ json_out = json.loads(response.read())["incidents"][0]
+
+ if state != json_out["status"]:
+ return json_out, True
+ return json_out, False
+
+
+def send_event(module, service_key, event_type, desc,
+ incident_key=None, client=None, client_url=None):
+ url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
+ headers = {
+ "Content-type": "application/json"
+ }
+
+ data = {
+ "service_key": service_key,
+ "event_type": event_type,
+ "incident_key": incident_key,
+ "description": desc,
+ "client": client,
+ "client_url": client_url
+ }
+
+ response, info = fetch_url(module, url, method='post',
+ headers=headers, data=json.dumps(data))
+ if info['status'] != 200:
+ module.fail_json(msg="failed to %s. Reason: %s" %
+ (event_type, info['msg']))
+ json_out = json.loads(response.read())
+ return json_out
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ service_key=dict(required=True),
+ api_key=dict(required=True),
+ state=dict(required=True,
+ choices=['triggered', 'acknowledged', 'resolved']),
+ client=dict(required=False, default=None),
+ client_url=dict(required=False, default=None),
+ desc=dict(required=False, default='Created via Ansible'),
+ incident_key=dict(required=False, default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ service_key = module.params['service_key']
+ api_key = module.params['api_key']
+ state = module.params['state']
+ client = module.params['client']
+ client_url = module.params['client_url']
+ desc = module.params['desc']
+ incident_key = module.params['incident_key']
+
+ state_event_dict = {
+ 'triggered': 'trigger',
+ 'acknowledged': 'acknowledge',
+ 'resolved': 'resolve'
+ }
+
+ event_type = state_event_dict[state]
+
+ if event_type != 'trigger' and incident_key is None:
+ module.fail_json(msg="incident_key is required for "
+ "acknowledge or resolve events")
+
+ out, changed = check(module, name, state,
+ service_key, api_key, incident_key)
+
+ if not module.check_mode and changed is True:
+ out = send_event(module, service_key, event_type, desc,
+ incident_key, client, client_url)
+
+ module.exit_json(result=out, changed=changed)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/pingdom.py b/lib/ansible/modules/extras/monitoring/pingdom.py
new file mode 100644
index 0000000000..4346e8ca6f
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/pingdom.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+
+module: pingdom
+short_description: Pause/unpause Pingdom alerts
+description:
+ - This module will let you pause/unpause Pingdom alerts
+version_added: "1.2"
+author:
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns"
+requirements:
+ - "This pingdom python library: https://github.com/mbabineau/pingdom-python"
+options:
+ state:
+ description:
+ - Define whether or not the check should be running or paused.
+ required: true
+ default: null
+ choices: [ "running", "paused" ]
+ aliases: []
+ checkid:
+ description:
+ - Pingdom ID of the check.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ uid:
+ description:
+ - Pingdom user ID.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ passwd:
+ description:
+ - Pingdom user password.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ key:
+ description:
+ - Pingdom API key.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+notes:
+ - This module does not yet have support to add/remove checks.
+'''
+
+EXAMPLES = '''
+# Pause the check with the ID of 12345.
+- pingdom: uid=example@example.com
+ passwd=password123
+ key=apipassword123
+ checkid=12345
+ state=paused
+
+# Unpause the check with the ID of 12345.
+- pingdom: uid=example@example.com
+ passwd=password123
+ key=apipassword123
+ checkid=12345
+ state=running
+'''
+
+try:
+ import pingdom
+ HAS_PINGDOM = True
+except:
+ HAS_PINGDOM = False
+
+
+
+def pause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=True)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ #if result != "paused": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def unpause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=False)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ #if result != "up": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']),
+ checkid=dict(required=True),
+ uid=dict(required=True),
+ passwd=dict(required=True),
+ key=dict(required=True)
+ )
+ )
+
+ if not HAS_PINGDOM:
+ module.fail_json(msg="Missing required pingdom module (check docs)")
+
+ checkid = module.params['checkid']
+ state = module.params['state']
+ uid = module.params['uid']
+ passwd = module.params['passwd']
+ key = module.params['key']
+
+ if (state == "paused" or state == "stopped"):
+ (rc, name, result) = pause(checkid, uid, passwd, key)
+
+ if (state == "running" or state == "started"):
+ (rc, name, result) = unpause(checkid, uid, passwd, key)
+
+ if rc != 0:
+ module.fail_json(checkid=checkid, name=name, status=result)
+
+ module.exit_json(checkid=checkid, name=name, status=result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/rollbar_deployment.py b/lib/ansible/modules/extras/monitoring/rollbar_deployment.py
new file mode 100644
index 0000000000..060193b78a
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/rollbar_deployment.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014, Max Riveiro, <kavu13@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: rollbar_deployment
+version_added: 1.6
+author: "Max Riveiro (@kavu)"
+short_description: Notify Rollbar about app deployments
+description:
+ - Notify Rollbar about app deployments
+ (see https://rollbar.com/docs/deploys_other/)
+options:
+ token:
+ description:
+ - Your project access token.
+ required: true
+ environment:
+ description:
+ - Name of the environment being deployed, e.g. 'production'.
+ required: true
+ revision:
+ description:
+ - Revision number/sha being deployed.
+ required: true
+ user:
+ description:
+ - User who deployed.
+ required: false
+ rollbar_user:
+ description:
+ - Rollbar username of the user who deployed.
+ required: false
+ comment:
+ description:
+ - Deploy comment (e.g. what is being deployed).
+ required: false
+ url:
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: 'https://api.rollbar.com/api/1/deploy/'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated.
+ This should only be used on personally controlled sites using
+ self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+'''
+
+EXAMPLES = '''
+- rollbar_deployment: token=AAAAAA
+ environment='staging'
+ user='ansible'
+ revision=4.2,
+ rollbar_user='admin',
+ comment='Test Deploy'
+'''
+
+import urllib
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True),
+ environment=dict(required=True),
+ revision=dict(required=True),
+ user=dict(required=False),
+ rollbar_user=dict(required=False),
+ comment=dict(required=False),
+ url=dict(
+ required=False,
+ default='https://api.rollbar.com/api/1/deploy/'
+ ),
+ validate_certs=dict(default='yes', type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ params = dict(
+ access_token=module.params['token'],
+ environment=module.params['environment'],
+ revision=module.params['revision']
+ )
+
+ if module.params['user']:
+ params['local_username'] = module.params['user']
+
+ if module.params['rollbar_user']:
+ params['rollbar_username'] = module.params['rollbar_user']
+
+ if module.params['comment']:
+ params['comment'] = module.params['comment']
+
+ url = module.params.get('url')
+
+ try:
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception, e:
+ module.fail_json(msg='Unable to notify Rollbar: %s' % e)
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/lib/ansible/modules/extras/monitoring/sensu_check.py b/lib/ansible/modules/extras/monitoring/sensu_check.py
new file mode 100644
index 0000000000..7cf3850966
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/sensu_check.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: sensu_check
+short_description: Manage Sensu checks
+version_added: 2.0
+description:
+ - Manage the checks that should be run on a machine by I(Sensu).
+ - Most options do not have a default and will not be added to the check definition unless specified.
+ - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
+ - they are simply specified for your convenience.
+options:
+ name:
+ description:
+ - The name of the check
+ - This is the key that is used to determine whether a check exists
+ required: true
+ state:
+ description:
+ - Whether the check should be present or not
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ description:
+ - Path to the json file of the check to be added/removed.
+ - Will be created if it does not exist (unless I(state=absent)).
+ - The parent folders need to exist when I(state=present), otherwise an error will be thrown
+ required: false
+ default: /etc/sensu/conf.d/checks.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so
+ - you can get the original file back if you somehow clobbered it incorrectly.
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ command:
+ description:
+ - Path to the sensu check to run (not required when I(state=absent))
+ required: true
+ handlers:
+ description:
+ - List of handlers to notify when the check fails
+ required: false
+ default: []
+ subscribers:
+ description:
+ - List of subscribers/channels this check should run for
+ - See sensu_subscribers to subscribe a machine to a channel
+ required: false
+ default: []
+ interval:
+ description:
+ - Check interval in seconds
+ required: false
+ default: null
+ timeout:
+ description:
+ - Timeout for the check
+ required: false
+ default: 10
+ handle:
+ description:
+ - Whether the check should be handled or not
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: yes
+ subdue_begin:
+ description:
+ - When to disable handling of check failures
+ required: false
+ default: null
+ subdue_end:
+ description:
+ - When to enable handling of check failures
+ required: false
+ default: null
+ dependencies:
+ description:
+ - Other checks this check depends on, if dependencies fail,
+ - handling of this check will be disabled
+ required: false
+ default: []
+ metric:
+ description:
+ - Whether the check is a metric
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ standalone:
+ description:
+ - Whether the check should be scheduled by the sensu client or server
+ - This option obviates the need for specifying the I(subscribers) option
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ publish:
+ description:
+ - Whether the check should be scheduled at all.
+ - You can still issue it via the sensu api
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: yes
+ occurrences:
+ description:
+ - Number of event occurrences before the handler should take action
+ required: false
+ default: 1
+ refresh:
+ description:
+ - Number of seconds handlers should wait before taking second action
+ required: false
+ default: null
+ aggregate:
+ description:
+ - Classifies the check as an aggregate check,
+ - making it available via the aggregate API
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ low_flap_threshold:
+ description:
+ - The low threshhold for flap detection
+ required: false
+ default: null
+ high_flap_threshold:
+ description:
+ - The high threshhold for flap detection
+ required: false
+ default: null
+ custom:
+ version_added: "2.1"
+ description:
+ - A hash/dictionary of custom parameters for mixing to the configuration.
+ - You can't rewrite others module parameters using this
+ required: false
+ default: {}
+ source:
+ version_added: "2.1"
+ description:
+ - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
+ required: false
+ default: null
+requirements: [ ]
+author: "Anders Ingemann (@andsens)"
+'''
+
+EXAMPLES = '''
+# Fetch metrics about the CPU load every 60 seconds,
+# the sensu server has a handler called 'relay' which forwards stats to graphite
+- name: get cpu metrics
+ sensu_check: name=cpu_load
+ command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb
+ metric=yes handlers=relay subscribers=common interval=60
+
+# Check whether nginx is running
+- name: check nginx process
+ sensu_check: name=nginx_running
+ command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid'
+ handlers=default subscribers=nginx interval=60
+
+# Stop monitoring the disk capacity.
+# Note that the check will still show up in the sensu dashboard,
+# to remove it completely you need to issue a DELETE request to the sensu api.
+- name: check disk
+ sensu_check: name=check_disk_capacity state=absent
+'''
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+
+def sensu_check(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ stream = None
+ try:
+ try:
+ stream = open(path, 'r')
+ config = json.load(stream)
+ except IOError, e:
+ if e.errno is 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=str(e))
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+ finally:
+ if stream:
+ stream.close()
+
+ if 'checks' not in config:
+ if state == 'absent':
+ reasons.append('`checks\' section did not exist and state is `absent\'')
+ return changed, reasons
+ config['checks'] = {}
+ changed = True
+ reasons.append('`checks\' section did not exist')
+
+ if state == 'absent':
+ if name in config['checks']:
+ del config['checks'][name]
+ changed = True
+ reasons.append('check was present and state is `absent\'')
+
+ if state == 'present':
+ if name not in config['checks']:
+ check = {}
+ config['checks'][name] = check
+ changed = True
+ reasons.append('check was absent and state is `present\'')
+ else:
+ check = config['checks'][name]
+ simple_opts = ['command',
+ 'handlers',
+ 'subscribers',
+ 'interval',
+ 'timeout',
+ 'handle',
+ 'dependencies',
+ 'standalone',
+ 'publish',
+ 'occurrences',
+ 'refresh',
+ 'aggregate',
+ 'low_flap_threshold',
+ 'high_flap_threshold',
+ 'source',
+ ]
+ for opt in simple_opts:
+ if module.params[opt] is not None:
+ if opt not in check or check[opt] != module.params[opt]:
+ check[opt] = module.params[opt]
+ changed = True
+ reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
+ else:
+ if opt in check:
+ del check[opt]
+ changed = True
+ reasons.append('`{opt}\' was removed'.format(opt=opt))
+
+ if module.params['custom']:
+ # Convert to json
+ custom_params = module.params['custom']
+ overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end'])
+ if overwrited_fields:
+ msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
+ module.fail_json(msg=msg)
+
+ for k,v in custom_params.items():
+ if k in config['checks'][name]:
+ if not config['checks'][name][k] == v:
+ changed = True
+ reasons.append('`custom param {opt}\' was changed'.format(opt=k))
+ else:
+ changed = True
+ reasons.append('`custom param {opt}\' was added'.format(opt=k))
+ check[k] = v
+ simple_opts += custom_params.keys()
+
+ # Remove obsolete custom params
+ for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']):
+ changed = True
+ reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
+ del check[opt]
+
+ if module.params['metric']:
+ if 'type' not in check or check['type'] != 'metric':
+ check['type'] = 'metric'
+ changed = True
+ reasons.append('`type\' was not defined or not `metric\'')
+ if not module.params['metric'] and 'type' in check:
+ del check['type']
+ changed = True
+ reasons.append('`type\' was defined')
+
+ if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
+ subdue = {'begin': module.params['subdue_begin'],
+ 'end': module.params['subdue_end'],
+ }
+ if 'subdue' not in check or check['subdue'] != subdue:
+ check['subdue'] = subdue
+ changed = True
+ reasons.append('`subdue\' did not exist or was different')
+ else:
+ if 'subdue' in check:
+ del check['subdue']
+ changed = True
+ reasons.append('`subdue\' was removed')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ try:
+ stream = open(path, 'w')
+ stream.write(json.dumps(config, indent=2) + '\n')
+ except IOError, e:
+ module.fail_json(msg=str(e))
+ finally:
+ if stream:
+ stream.close()
+
+ return changed, reasons
+
+
+def main():
+
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ 'command': {'type': 'str'},
+ 'handlers': {'type': 'list'},
+ 'subscribers': {'type': 'list'},
+ 'interval': {'type': 'int'},
+ 'timeout': {'type': 'int'},
+ 'handle': {'type': 'bool'},
+ 'subdue_begin': {'type': 'str'},
+ 'subdue_end': {'type': 'str'},
+ 'dependencies': {'type': 'list'},
+ 'metric': {'type': 'bool', 'default': 'no'},
+ 'standalone': {'type': 'bool'},
+ 'publish': {'type': 'bool'},
+ 'occurrences': {'type': 'int'},
+ 'refresh': {'type': 'int'},
+ 'aggregate': {'type': 'bool'},
+ 'low_flap_threshold': {'type': 'int'},
+ 'high_flap_threshold': {'type': 'int'},
+ 'custom': {'type': 'dict'},
+ 'source': {'type': 'str'},
+ }
+
+ required_together = [['subdue_begin', 'subdue_end']]
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+ if module.params['state'] != 'absent' and module.params['command'] is None:
+ module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_check(module, path, name, state, backup)
+
+ module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/sensu_subscription.py b/lib/ansible/modules/extras/monitoring/sensu_subscription.py
new file mode 100644
index 0000000000..192b474ee4
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/sensu_subscription.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: sensu_subscription
+short_description: Manage Sensu subscriptions
+version_added: 2.2
+description:
+ - Manage which I(sensu channels) a machine should subscribe to
+options:
+ name:
+ description:
+ - The name of the channel
+ required: true
+ state:
+ description:
+ - Whether the machine should subscribe or unsubscribe from the channel
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ description:
+ - Path to the subscriptions json file
+ required: false
+ default: /etc/sensu/conf.d/subscriptions.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so you
+ - can get the original file back if you somehow clobbered it incorrectly.
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+requirements: [ ]
+author: Anders Ingemann
+'''
+
+RETURN = '''
+reasons:
+ description: the reasons why the moule changed or did not change something
+ returned: success
+ type: list
+ sample: ["channel subscription was absent and state is `present'"]
+'''
+
+EXAMPLES = '''
+# Subscribe to the nginx channel
+- name: subscribe to nginx checks
+ sensu_subscription: name=nginx
+
+# Unsubscribe from the common checks channel
+- name: unsubscribe from common checks
+ sensu_subscription: name=common state=absent
+'''
+
+
+def sensu_subscription(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ try:
+ import json
+ except ImportError:
+ import simplejson as json
+
+ try:
+ config = json.load(open(path))
+ except IOError:
+ e = get_exception()
+ if e.errno is 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=str(e))
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+
+ if 'client' not in config:
+ if state == 'absent':
+ reasons.append('`client\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client'] = {}
+ changed = True
+ reasons.append('`client\' did not exist')
+
+ if 'subscriptions' not in config['client']:
+ if state == 'absent':
+ reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
+ return changed
+ config['client']['subscriptions'] = []
+ changed = True
+ reasons.append('`client.subscriptions\' did not exist')
+
+ if name not in config['client']['subscriptions']:
+ if state == 'absent':
+ reasons.append('channel subscription was absent')
+ return changed
+ config['client']['subscriptions'].append(name)
+ changed = True
+ reasons.append('channel subscription was absent and state is `present\'')
+ else:
+ if state == 'absent':
+ config['client']['subscriptions'].remove(name)
+ changed = True
+ reasons.append('channel subscription was present and state is `absent\'')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ open(path, 'w').write(json.dumps(config, indent=2) + '\n')
+ except IOError:
+ e = get_exception()
+ module.fail_json(msg='Failed to write to file %s: %s' % (path, str(e)))
+
+ return changed, reasons
+
+
+def main():
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'str', 'default': 'no', 'type': 'bool'},
+ }
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_subscription(module, path, name, state, backup)
+
+ module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/stackdriver.py b/lib/ansible/modules/extras/monitoring/stackdriver.py
new file mode 100644
index 0000000000..25af77ec26
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/stackdriver.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+
+module: stackdriver
+short_description: Send code deploy and annotation events to stackdriver
+description:
+ - Send code deploy and annotation events to Stackdriver
+version_added: "1.6"
+author: "Ben Whaley (@bwhaley)"
+options:
+ key:
+ description:
+ - API key.
+ required: true
+ default: null
+ event:
+ description:
+ - The type of event to send, either annotation or deploy
+ choices: ['annotation', 'deploy']
+ required: false
+ default: null
+ revision_id:
+ description:
+ - The revision of the code that was deployed. Required for deploy events
+ required: false
+ default: null
+ deployed_by:
+ description:
+ - The person or robot responsible for deploying the code
+ required: false
+ default: "Ansible"
+ deployed_to:
+ description:
+ - "The environment code was deployed to. (ie: development, staging, production)"
+ required: false
+ default: null
+ repository:
+ description:
+ - The repository (or project) deployed
+ required: false
+ default: null
+ msg:
+ description:
+ - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation.
+ required: false
+ default: null
+ annotated_by:
+ description:
+ - The person or robot who the annotation should be attributed to.
+ required: false
+ default: "Ansible"
+ level:
+ description:
+ - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display.
+ choices: ['INFO', 'WARN', 'ERROR']
+ required: false
+ default: 'INFO'
+ instance_id:
+ description:
+ - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown
+ required: false
+ default: null
+ event_epoch:
+ description:
+ - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this."
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- stackdriver: key=AAAAAA event=deploy deployed_to=production deployed_by=leeroyjenkins repository=MyWebApp revision_id=abcd123
+
+- stackdriver: key=AAAAAA event=annotation msg="Greetings from Ansible" annotated_by=leeroyjenkins level=WARN instance_id=i-abcd1234
+'''
+
+# ===========================================
+# Stackdriver module specific support methods.
+#
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+
+def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
+ """Send a deploy event to Stackdriver"""
+ deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent"
+
+ params = {}
+ params['revision_id'] = revision_id
+ params['deployed_by'] = deployed_by
+ if deployed_to:
+ params['deployed_to'] = deployed_to
+ if repository:
+ params['repository'] = repository
+
+ return do_send_request(module, deploy_api, params, key)
+
+def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
+ """Send an annotation event to Stackdriver"""
+ annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
+
+ params = {}
+ params['message'] = msg
+ if annotated_by:
+ params['annotated_by'] = annotated_by
+ if level:
+ params['level'] = level
+ if instance_id:
+ params['instance_id'] = instance_id
+ if event_epoch:
+ params['event_epoch'] = event_epoch
+
+ return do_send_request(module, annotation_api, params, key)
+
+def do_send_request(module, url, params, key):
+ data = json.dumps(params)
+ headers = {
+ 'Content-Type': 'application/json',
+ 'x-stackdriver-apikey': key
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
+ if info['status'] != 200:
+ module.fail_json(msg="Unable to send msg: %s" % info['msg'])
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ key=dict(required=True),
+ event=dict(required=True, choices=['deploy', 'annotation']),
+ msg=dict(),
+ revision_id=dict(),
+ annotated_by=dict(default='Ansible'),
+ level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']),
+ instance_id=dict(),
+ event_epoch=dict(),
+ deployed_by=dict(default='Ansible'),
+ deployed_to=dict(),
+ repository=dict(),
+ ),
+ supports_check_mode=True
+ )
+
+ key = module.params["key"]
+ event = module.params["event"]
+
+ # Annotation params
+ msg = module.params["msg"]
+ annotated_by = module.params["annotated_by"]
+ level = module.params["level"]
+ instance_id = module.params["instance_id"]
+ event_epoch = module.params["event_epoch"]
+
+ # Deploy params
+ revision_id = module.params["revision_id"]
+ deployed_by = module.params["deployed_by"]
+ deployed_to = module.params["deployed_to"]
+ repository = module.params["repository"]
+
+ ##################################################################
+ # deploy requires revision_id
+ # annotation requires msg
+ # We verify these manually
+ ##################################################################
+
+ if event == 'deploy':
+ if not revision_id:
+ module.fail_json(msg="revision_id required for deploy events")
+ try:
+ send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
+ except Exception, e:
+ module.fail_json(msg="unable to sent deploy event: %s" % e)
+
+ if event == 'annotation':
+ if not msg:
+ module.fail_json(msg="msg required for annotation events")
+ try:
+ send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
+ except Exception, e:
+ module.fail_json(msg="unable to sent annotation event: %s" % e)
+
+ changed = True
+ module.exit_json(changed=changed, deployed_by=deployed_by)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/lib/ansible/modules/extras/monitoring/statusio_maintenance.py b/lib/ansible/modules/extras/monitoring/statusio_maintenance.py
new file mode 100644
index 0000000000..c2b93db5c9
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/statusio_maintenance.py
@@ -0,0 +1,480 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+
+module: statusio_maintenance
+short_description: Create maintenance windows for your status.io dashboard
+description:
+ - Creates a maintenance window for status.io
+ - Deletes a maintenance window for status.io
+notes:
+ - You can use the apiary API url (http://docs.statusio.apiary.io/) to
+ capture API traffic
+ - Use start_date and start_time with minutes to set future maintenance window
+version_added: "2.2"
+author: Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+options:
+ title:
+ description:
+ - A descriptive title for the maintenance window
+ required: false
+ default: "A new maintenance window"
+ desc:
+ description:
+ - Message describing the maintenance window
+ required: false
+ default: "Created by Ansible"
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ api_id:
+ description:
+ - Your unique API ID from status.io
+ required: true
+ api_key:
+ description:
+ - Your unique API Key from status.io
+ required: true
+ statuspage:
+ description:
+ - Your unique StatusPage ID from status.io
+ required: true
+ url:
+ description:
+ - Status.io API URL. A private apiary can be used instead.
+ required: false
+ default: "https://api.status.io"
+ components:
+ description:
+ - The given name of your component (server name)
+ required: false
+ aliases: ['component']
+ default: None
+ containers:
+ description:
+ - The given name of your container (data center)
+ required: false
+ aliases: ['container']
+ default: None
+ all_infrastructure_affected:
+ description:
+ - If it affects all components and containers
+ required: false
+ default: false
+ automation:
+ description:
+ - Automatically start and end the maintenance window
+ required: false
+ default: false
+ maintenance_notify_now:
+ description:
+ - Notify subscribers now
+ required: false
+ default: false
+ maintenance_notify_72_hr:
+ description:
+ - Notify subscribers 72 hours before maintenance start time
+ required: false
+ default: false
+ maintenance_notify_24_hr:
+ description:
+ - Notify subscribers 24 hours before maintenance start time
+ required: false
+ default: false
+ maintenance_notify_1_hr:
+ description:
+ - Notify subscribers 1 hour before maintenance start time
+ required: false
+ default: false
+ maintenance_id:
+ description:
+ - The maintenance id number when deleting a maintenance window
+ required: false
+ default: None
+ minutes:
+ description:
+ - The length of time in UTC that the maintenance will run \
+ (starting from playbook runtime)
+ required: false
+ default: 10
+ start_date:
+ description:
+ - Date maintenance is expected to start (Month/Day/Year) (UTC)
+ - End Date is worked out from start_date + minutes
+ required: false
+ default: None
+ start_time:
+ description:
+ - Time maintenance is expected to start (Hour:Minutes) (UTC)
+ - End Time is worked out from start_time + minutes
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Create a maintenance window for 10 minutes on server1.example.com, with
+automation to stop the maintenance.
+- statusio_maintenance:
+ title: "Router Upgrade from ansible"
+ desc: "Performing a Router Upgrade"
+ components: "server1.example.com"
+ api_id: "api_id"
+ api_key: "api_key"
+ statuspage: "statuspage_id"
+ maintenance_notify_1_hr: true
+ automation: true
+
+# Create a maintenance window for 60 minutes on multiple hosts
+- name: "Create maintenance window for server1 and server2"
+ local_action:
+ module: statusio_maintenance
+ title: "Routine maintenance"
+ desc: "Some security updates"
+ components:
+ - "server1.example.com
+ - "server2.example.com"
+ minutes: "60"
+ api_id: "api_id"
+ api_key: "api_key"
+ statuspage: "statuspage_id"
+ maintenance_notify_1_hr: true
+ automation: true
+
+# Create a future maintenance window for 24 hours to all hosts inside the
+# Primary Data Center
+- statusio_maintenance:
+ title: Data center downtime
+ desc: Performing a Upgrade to our data center
+ components: "Primary Data Center"
+ api_id: "api_id"
+ api_key: "api_key"
+ statuspage: "statuspage_id"
+ start_date: "01/01/2016"
+ start_time: "12:00"
+ minutes: 1440
+
+# Delete a maintenance window
+- statusio_maintenance:
+ title: "Remove a maintenance window"
+ maintenance_id: "561f90faf74bc94a4700087b"
+ statuspage: "statuspage_id"
+ api_id: "api_id"
+ api_key: "api_key"
+ state: absent
+
+'''
+# TODO: Add RETURN documentation.
+RETURN = ''' # '''
+
+import datetime
+
+
+def get_api_auth_headers(api_id, api_key, url, statuspage):
+
+ headers = {
+ "x-api-id": api_id,
+ "x-api-key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ response = open_url(
+ url + "/v2/component/list/" + statuspage, headers=headers)
+ data = json.loads(response.read())
+ if data['status']['message'] == 'Authentication failed':
+ return 1, None, None, "Authentication failed: " \
+ "Check api_id/api_key and statuspage id."
+ else:
+ auth_headers = headers
+ auth_content = data
+ except:
+ return 1, None, None, e
+ return 0, auth_headers, auth_content, None
+
+
+def get_component_ids(auth_content, components):
+ host_ids = []
+ lower_components = [x.lower() for x in components]
+ for result in auth_content["result"]:
+ if result['name'].lower() in lower_components:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_components.remove(result['name'].lower())
+ if len(lower_components):
+ # items not found in the api
+ return 1, None, lower_components
+ return 0, host_ids, None
+
+
+def get_container_ids(auth_content, containers):
+ host_ids = []
+ lower_containers = [x.lower() for x in containers]
+ for result in auth_content["result"]:
+ if result["containers"][0]["name"].lower() in lower_containers:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_containers.remove(result["containers"][0]["name"].lower())
+
+ if len(lower_containers):
+ # items not found in the api
+ return 1, None, lower_containers
+ return 0, host_ids, None
+
+
+def get_date_time(start_date, start_time, minutes):
+ returned_date = []
+ if start_date and start_time:
+ try:
+ datetime.datetime.strptime(start_date, '%m/%d/%Y')
+ returned_date.append(start_date)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_date format."
+ try:
+ datetime.datetime.strptime(start_time, '%H:%M')
+ returned_date.append(start_time)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_time format."
+ try:
+ # Work out end date/time based on minutes
+ date_time_start = datetime.datetime.strptime(
+ start_time + start_date, '%H:%M%m/%d/%Y')
+ delta = date_time_start + datetime.timedelta(minutes=minutes)
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ except (NameError, ValueError):
+ return 1, None, "Couldn't work out a valid date"
+ else:
+ now = datetime.datetime.utcnow()
+ delta = now + datetime.timedelta(minutes=minutes)
+ # start_date
+ returned_date.append(now.strftime("%m/%d/%Y"))
+ returned_date.append(now.strftime("%H:%M"))
+ # end_date
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ return 0, returned_date, None
+
+
+def create_maintenance(auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation, title, desc,
+ returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr):
+ returned_dates = [[x] for x in returned_date]
+ component_id = []
+ container_id = []
+ for val in host_ids:
+ component_id.append(val['component_id'])
+ container_id.append(val['container_id'])
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "components": component_id,
+ "containers": container_id,
+ "all_infrastructure_affected":
+ str(int(all_infrastructure_affected)),
+ "automation": str(int(automation)),
+ "maintenance_name": title,
+ "maintenance_details": desc,
+ "date_planned_start": returned_dates[0],
+ "time_planned_start": returned_dates[1],
+ "date_planned_end": returned_dates[2],
+ "time_planned_end": returned_dates[3],
+ "maintenance_notify_now": str(int(maintenance_notify_now)),
+ "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)),
+ "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)),
+ "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr))
+ })
+ response = open_url(
+ url + "/v2/maintenance/schedule", data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+
+ if data["status"]["error"] == "yes":
+ return 1, None, data["status"]["message"]
+ except Exception:
+ e = get_exception()
+ return 1, None, str(e)
+ return 0, None, None
+
+
+def delete_maintenance(auth_headers, url, statuspage, maintenance_id):
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "maintenance_id": maintenance_id,
+ })
+ response = open_url(
+ url=url + "/v2/maintenance/delete",
+ data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+ if data["status"]["error"] == "yes":
+ return 1, None, "Invalid maintenance_id"
+ except Exception:
+ e = get_exception()
+ return 1, None, str(e)
+ return 0, None, None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_id=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ statuspage=dict(required=True),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ url=dict(default='https://api.status.io', required=False),
+ components=dict(type='list', required=False, default=None,
+ aliases=['component']),
+ containers=dict(type='list', required=False, default=None,
+ aliases=['container']),
+ all_infrastructure_affected=dict(type='bool', default=False,
+ required=False),
+ automation=dict(type='bool', default=False, required=False),
+ title=dict(required=False, default='A new maintenance window'),
+ desc=dict(required=False, default='Created by Ansible'),
+ minutes=dict(type='int', required=False, default=10),
+ maintenance_notify_now=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_72_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_24_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_1_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_id=dict(required=False, default=None),
+ start_date=dict(default=None, required=False),
+ start_time=dict(default=None, required=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ api_id = module.params['api_id']
+ api_key = module.params['api_key']
+ statuspage = module.params['statuspage']
+ state = module.params['state']
+ url = module.params['url']
+ components = module.params['components']
+ containers = module.params['containers']
+ all_infrastructure_affected = module.params['all_infrastructure_affected']
+ automation = module.params['automation']
+ title = module.params['title']
+ desc = module.params['desc']
+ minutes = module.params['minutes']
+ maintenance_notify_now = module.params['maintenance_notify_now']
+ maintenance_notify_72_hr = module.params['maintenance_notify_72_hr']
+ maintenance_notify_24_hr = module.params['maintenance_notify_24_hr']
+ maintenance_notify_1_hr = module.params['maintenance_notify_1_hr']
+ maintenance_id = module.params['maintenance_id']
+ start_date = module.params['start_date']
+ start_time = module.params['start_time']
+
+ if state == "present":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+ auth_content = {}
+
+ if minutes or start_time and start_date:
+ (rc, returned_date, error) = get_date_time(
+ start_date, start_time, minutes)
+ if rc != 0:
+ module.fail_json(msg="Failed to set date/time: %s" % error)
+
+ if not components and not containers:
+ return module.fail_json(msg="A Component or Container must be "
+ "defined")
+ elif components and containers:
+ return module.fail_json(msg="Components and containers cannot "
+ "be used together")
+ else:
+ if components:
+ (rc, host_ids, error) = get_component_ids(auth_content,
+ components)
+ if rc != 0:
+ module.fail_json(msg="Failed to find component %s" % error)
+
+ if containers:
+ (rc, host_ids, error) = get_container_ids(auth_content,
+ containers)
+ if rc != 0:
+ module.fail_json(msg="Failed to find container %s" % error)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = create_maintenance(
+ auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation,
+ title, desc, returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr)
+ if rc == 0:
+ module.exit_json(changed=True, result="Successfully created "
+ "maintenance")
+ else:
+ module.fail_json(msg="Failed to create maintenance: %s"
+ % error)
+
+ if state == "absent":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = delete_maintenance(
+ auth_headers, url, statuspage, maintenance_id)
+ if rc == 0:
+ module.exit_json(
+ changed=True,
+ result="Successfully deleted maintenance"
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete maintenance: %s" % error)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/uptimerobot.py b/lib/ansible/modules/extras/monitoring/uptimerobot.py
new file mode 100644
index 0000000000..65d963cda6
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/uptimerobot.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+DOCUMENTATION = '''
+
+module: uptimerobot
+short_description: Pause and start Uptime Robot monitoring
+description:
+ - This module will let you start and pause Uptime Robot Monitoring
+author: "Nate Kingsley (@nate-kingsley)"
+version_added: "1.9"
+requirements:
+ - Valid Uptime Robot API Key
+options:
+ state:
+ description:
+ - Define whether or not the monitor should be running or paused.
+ required: true
+ default: null
+ choices: [ "started", "paused" ]
+ aliases: []
+ monitorid:
+ description:
+ - ID of the monitor to check.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ apikey:
+ description:
+ - Uptime Robot API key.
+ required: true
+ default: null
+ choices: []
+ aliases: []
+notes:
+ - Support for adding and removing monitors and alert contacts has not yet been implemented.
+'''
+
+EXAMPLES = '''
+# Pause the monitor with an ID of 12345.
+- uptimerobot: monitorid=12345
+ apikey=12345-1234512345
+ state=paused
+
+# Start the monitor with an ID of 12345.
+- uptimerobot: monitorid=12345
+ apikey=12345-1234512345
+ state=started
+
+'''
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+import urllib
+import time
+
+API_BASE = "http://api.uptimerobot.com/"
+
+API_ACTIONS = dict(
+ status='getMonitors?',
+ editMonitor='editMonitor?'
+)
+
+API_FORMAT = 'json'
+API_NOJSONCALLBACK = 1
+CHANGED_STATE = False
+SUPPORTS_CHECK_MODE = False
+
+
+def checkID(module, params):
+
+ data = urllib.urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['status'] + data
+ req, info = fetch_url(module, full_uri)
+ result = req.read()
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult
+
+
+def startMonitor(module, params):
+
+ params['monitorStatus'] = 1
+ data = urllib.urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = req.read()
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def pauseMonitor(module, params):
+
+ params['monitorStatus'] = 0
+ data = urllib.urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = req.read()
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(required=True, choices=['started', 'paused']),
+ apikey = dict(required=True),
+ monitorid = dict(required=True)
+ ),
+ supports_check_mode=SUPPORTS_CHECK_MODE
+ )
+
+ params = dict(
+ apiKey=module.params['apikey'],
+ monitors=module.params['monitorid'],
+ monitorID=module.params['monitorid'],
+ format=API_FORMAT,
+ noJsonCallback=API_NOJSONCALLBACK
+ )
+
+ check_result = checkID(module, params)
+
+ if check_result['stat'] != "ok":
+ module.fail_json(
+ msg="failed",
+ result=check_result['message']
+ )
+
+ if module.params['state'] == 'started':
+ monitor_result = startMonitor(module, params)
+ else:
+ monitor_result = pauseMonitor(module, params)
+
+ module.exit_json(
+ msg="success",
+ result=monitor_result
+ )
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/extras/monitoring/zabbix_group.py b/lib/ansible/modules/extras/monitoring/zabbix_group.py
new file mode 100644
index 0000000000..a19c49794f
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/zabbix_group.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2013-2014, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: zabbix_group
+short_description: Zabbix host groups creates/deletes
+description:
+ - Create host groups if they do not exist.
+ - Delete existing host groups if they exist.
+version_added: "1.8"
+author:
+ - "(@cove)"
+ - "Tony Minfei Ding"
+ - "Harrison Gu (@harrisongu)"
+requirements:
+ - "python >= 2.6"
+ - zabbix-api
+options:
+ server_url:
+ description:
+ - Url of Zabbix server, with protocol (http or https).
+ C(url) is an alias for C(server_url).
+ required: true
+ aliases: [ "url" ]
+ login_user:
+ description:
+ - Zabbix user name.
+ required: true
+ login_password:
+ description:
+ - Zabbix user password.
+ required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
+ state:
+ description:
+ - Create or delete host group.
+ required: false
+ default: "present"
+ choices: [ "present", "absent" ]
+ timeout:
+ description:
+ - The timeout of API request(seconds).
+ default: 10
+ host_groups:
+ description:
+ - List of host groups to create or delete.
+ required: true
+ aliases: [ "host_group" ]
+notes:
+ - Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed.
+'''
+
+EXAMPLES = '''
+# Base create host groups example
+- name: Create host groups
+ local_action:
+ module: zabbix_group
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ state: present
+ host_groups:
+ - Example group1
+ - Example group2
+
+# Limit the Zabbix group creations to one host since Zabbix can return an error when doing concurent updates
+- name: Create host groups
+ local_action:
+ module: zabbix_group
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ state: present
+ host_groups:
+ - Example group1
+ - Example group2
+ when: inventory_hostname==groups['group_name'][0]
+'''
+
+try:
+ from zabbix_api import ZabbixAPI, ZabbixAPISubClass
+ from zabbix_api import Already_Exists
+
+ HAS_ZABBIX_API = True
+except ImportError:
+ HAS_ZABBIX_API = False
+
+
+class HostGroup(object):
+ def __init__(self, module, zbx):
+ self._module = module
+ self._zapi = zbx
+
+ # create host group(s) if not exists
+ def create_host_group(self, group_names):
+ try:
+ group_add_list = []
+ for group_name in group_names:
+ result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
+ if not result:
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.hostgroup.create({'name': group_name})
+ group_add_list.append(group_name)
+ except Already_Exists:
+ return group_add_list
+ return group_add_list
+ except Exception, e:
+ self._module.fail_json(msg="Failed to create host group(s): %s" % e)
+
+ # delete host group(s)
+ def delete_host_group(self, group_ids):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.hostgroup.delete(group_ids)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e)
+
+ # get group ids by name
+ def get_group_ids(self, host_groups):
+ group_ids = []
+
+ group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': host_groups}})
+ for group in group_list:
+ group_id = group['groupid']
+ group_ids.append(group_id)
+ return group_ids, group_list
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str',required=False, default=None),
+ http_login_password=dict(type='str',required=False, default=None, no_log=True),
+ host_groups=dict(type='list', required=True, aliases=['host_group']),
+ state=dict(default="present", choices=['present','absent']),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
+
+ server_url = module.params['server_url']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ host_groups = module.params['host_groups']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ zbx = None
+
+ # login to zabbix
+ try:
+ zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
+ zbx.login(login_user, login_password)
+ except Exception, e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ hostGroup = HostGroup(module, zbx)
+
+ group_ids = []
+ group_list = []
+ if host_groups:
+ group_ids, group_list = hostGroup.get_group_ids(host_groups)
+
+ if state == "absent":
+ # delete host groups
+ if group_ids:
+ delete_group_names = []
+ hostGroup.delete_host_group(group_ids)
+ for group in group_list:
+ delete_group_names.append(group['name'])
+ module.exit_json(changed=True,
+ result="Successfully deleted host group(s): %s." % ",".join(delete_group_names))
+ else:
+ module.exit_json(changed=False, result="No host group(s) to delete.")
+ else:
+ # create host groups
+ group_add_list = hostGroup.create_host_group(host_groups)
+ if len(group_add_list) > 0:
+ module.exit_json(changed=True, result="Successfully created host group(s): %s" % group_add_list)
+ else:
+ module.exit_json(changed=False)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/zabbix_host.py b/lib/ansible/modules/extras/monitoring/zabbix_host.py
new file mode 100644
index 0000000000..20d8b6e21f
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/zabbix_host.py
@@ -0,0 +1,562 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013-2014, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: zabbix_host
+short_description: Zabbix host creates/updates/deletes
+description:
+ - This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
+version_added: "2.0"
+author:
+ - "(@cove)"
+ - "Tony Minfei Ding"
+ - "Harrison Gu (@harrisongu)"
+requirements:
+ - "python >= 2.6"
+ - zabbix-api
+options:
+ server_url:
+ description:
+ - Url of Zabbix server, with protocol (http or https).
+ required: true
+ aliases: [ "url" ]
+ login_user:
+ description:
+ - Zabbix user name, used to authenticate against the server.
+ required: true
+ login_password:
+ description:
+ - Zabbix user password.
+ required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
+ host_name:
+ description:
+ - Name of the host in Zabbix.
+ - host_name is the unique identifier used and cannot be updated using this module.
+ required: true
+ host_groups:
+ description:
+ - List of host groups the host is part of.
+ required: false
+ link_templates:
+ description:
+ - List of templates linked to the host.
+ required: false
+ default: None
+ inventory_mode:
+ description:
+ - Configure the inventory mode.
+ choices: ['automatic', 'manual', 'disabled']
+ required: false
+ default: None
+ version_added: '2.1'
+ status:
+ description:
+ - Monitoring status of the host.
+ required: false
+ choices: ['enabled', 'disabled']
+ default: "enabled"
+ state:
+ description:
+ - State of the host.
+ - On C(present), it will create if host does not exist or update the host if the associated data is different.
+ - On C(absent) will remove a host if it exists.
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+ timeout:
+ description:
+ - The timeout of API request (seconds).
+ default: 10
+ proxy:
+ description:
+ - The name of the Zabbix Proxy to be used
+ default: None
+ interfaces:
+ description:
+ - List of interfaces to be created for the host (see example below).
+ - 'Available values are: dns, ip, main, port, type and useip.'
+ - Please review the interface documentation for more information on the supported properties
+ - 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
+ required: false
+ default: []
+ force:
+ description:
+ - Overwrite the host configuration, even if already present
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ version_added: "2.0"
+'''
+
+EXAMPLES = '''
+- name: Create a new host or update an existing host's info
+ local_action:
+ module: zabbix_host
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ host_name: ExampleHost
+ host_groups:
+ - Example group1
+ - Example group2
+ link_templates:
+ - Example template1
+ - Example template2
+ status: enabled
+ state: present
+ inventory_mode: automatic
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.xx.xx.xx
+ dns: ""
+ port: 10050
+ - type: 4
+ main: 1
+ useip: 1
+ ip: 10.xx.xx.xx
+ dns: ""
+ port: 12345
+ proxy: a.zabbix.proxy
+'''
+
+import logging
+import copy
+
+try:
+ from zabbix_api import ZabbixAPI, ZabbixAPISubClass
+
+ HAS_ZABBIX_API = True
+except ImportError:
+ HAS_ZABBIX_API = False
+
+
+# Extend the ZabbixAPI
+# Since the zabbix-api python module too old (version 1.0, no higher version so far),
+# it does not support the 'hostinterface' api calls,
+# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
+class ZabbixAPIExtends(ZabbixAPI):
+ hostinterface = None
+
+ def __init__(self, server, timeout, user, passwd, **kwargs):
+ ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
+ self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
+
+
+class Host(object):
+ def __init__(self, module, zbx):
+ self._module = module
+ self._zapi = zbx
+
+ # exist host
+ def is_host_exist(self, host_name):
+ result = self._zapi.host.get({'filter': {'host': host_name}})
+ return result
+
+ # check if host group exists
+ def check_host_group_exist(self, group_names):
+ for group_name in group_names:
+ result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
+ if not result:
+ self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
+ return True
+
+ def get_template_ids(self, template_list):
+ template_ids = []
+ if template_list is None or len(template_list) == 0:
+ return template_ids
+ for template in template_list:
+ template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
+ if len(template_list) < 1:
+ self._module.fail_json(msg="Template not found: %s" % template)
+ else:
+ template_id = template_list[0]['templateid']
+ template_ids.append(template_id)
+ return template_ids
+
+ def add_host(self, host_name, group_ids, status, interfaces, proxy_id):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}
+ if proxy_id:
+ parameters['proxy_hostid'] = proxy_id
+ host_list = self._zapi.host.create(parameters)
+ if len(host_list) >= 1:
+ return host_list['hostids'][0]
+ except Exception, e:
+ self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
+
+ def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ parameters = {'hostid': host_id, 'groups': group_ids, 'status': status}
+ if proxy_id:
+ parameters['proxy_hostid'] = proxy_id
+ self._zapi.host.update(parameters)
+ interface_list_copy = exist_interface_list
+ if interfaces:
+ for interface in interfaces:
+ flag = False
+ interface_str = interface
+ for exist_interface in exist_interface_list:
+ interface_type = interface['type']
+ exist_interface_type = int(exist_interface['type'])
+ if interface_type == exist_interface_type:
+ # update
+ interface_str['interfaceid'] = exist_interface['interfaceid']
+ self._zapi.hostinterface.update(interface_str)
+ flag = True
+ interface_list_copy.remove(exist_interface)
+ break
+ if not flag:
+ # add
+ interface_str['hostid'] = host_id
+ self._zapi.hostinterface.create(interface_str)
+ # remove
+ remove_interface_ids = []
+ for remove_interface in interface_list_copy:
+ interface_id = remove_interface['interfaceid']
+ remove_interface_ids.append(interface_id)
+ if len(remove_interface_ids) > 0:
+ self._zapi.hostinterface.delete(remove_interface_ids)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
+
+ def delete_host(self, host_id, host_name):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.delete([host_id])
+ except Exception, e:
+ self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
+
+ # get host by host name
+ def get_host_by_host_name(self, host_name):
+ host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}})
+ if len(host_list) < 1:
+ self._module.fail_json(msg="Host not found: %s" % host_name)
+ else:
+ return host_list[0]
+
+ # get proxyid by proxy name
+ def get_proxyid_by_proxy_name(self, proxy_name):
+ proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
+ if len(proxy_list) < 1:
+ self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
+ else:
+ return proxy_list[0]['proxyid']
+
+ # get group ids by group names
+ def get_group_ids_by_group_names(self, group_names):
+ group_ids = []
+ if self.check_host_group_exist(group_names):
+ group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
+ for group in group_list:
+ group_id = group['groupid']
+ group_ids.append({'groupid': group_id})
+ return group_ids
+
+ # get host templates by host id
+ def get_host_templates_by_host_id(self, host_id):
+ template_ids = []
+ template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
+ for template in template_list:
+ template_ids.append(template['templateid'])
+ return template_ids
+
+ # get host groups by host id
+ def get_host_groups_by_host_id(self, host_id):
+ exist_host_groups = []
+ host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
+
+ if len(host_groups_list) >= 1:
+ for host_groups_name in host_groups_list:
+ exist_host_groups.append(host_groups_name['name'])
+ return exist_host_groups
+
+ # check the exist_interfaces whether it equals the interfaces or not
+ def check_interface_properties(self, exist_interface_list, interfaces):
+ interfaces_port_list = []
+
+ if interfaces is not None:
+ if len(interfaces) >= 1:
+ for interface in interfaces:
+ interfaces_port_list.append(int(interface['port']))
+
+ exist_interface_ports = []
+ if len(exist_interface_list) >= 1:
+ for exist_interface in exist_interface_list:
+ exist_interface_ports.append(int(exist_interface['port']))
+
+ if set(interfaces_port_list) != set(exist_interface_ports):
+ return True
+
+ for exist_interface in exist_interface_list:
+ exit_interface_port = int(exist_interface['port'])
+ for interface in interfaces:
+ interface_port = int(interface['port'])
+ if interface_port == exit_interface_port:
+ for key in interface.keys():
+ if str(exist_interface[key]) != str(interface[key]):
+ return True
+
+ return False
+
+ # get the status of host by host
+ def get_host_status_by_host(self, host):
+ return host['status']
+
+ # check all the properties before link or clear template
+ def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
+ exist_interfaces, host, proxy_id):
+ # get the existing host's groups
+ exist_host_groups = self.get_host_groups_by_host_id(host_id)
+ if set(host_groups) != set(exist_host_groups):
+ return True
+
+ # get the existing status
+ exist_status = self.get_host_status_by_host(host)
+ if int(status) != int(exist_status):
+ return True
+
+ # check the exist_interfaces whether it equals the interfaces or not
+ if self.check_interface_properties(exist_interfaces, interfaces):
+ return True
+
+ # get the existing templates
+ exist_template_ids = self.get_host_templates_by_host_id(host_id)
+ if set(list(template_ids)) != set(exist_template_ids):
+ return True
+
+ if proxy_id is not None:
+ if host['proxy_hostid'] != proxy_id:
+ return True
+
+ return False
+
+ # link or clear template of the host
+ def link_or_clear_template(self, host_id, template_id_list):
+ # get host's exist template ids
+ exist_template_id_list = self.get_host_templates_by_host_id(host_id)
+
+ exist_template_ids = set(exist_template_id_list)
+ template_ids = set(template_id_list)
+ template_id_list = list(template_ids)
+
+ # get unlink and clear templates
+ templates_clear = exist_template_ids.difference(template_ids)
+ templates_clear_list = list(templates_clear)
+ request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list}
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.update(request_str)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to link template to host: %s" % e)
+
+ # Update the host inventory_mode
+ def update_inventory_mode(self, host_id, inventory_mode):
+
+ # nothing was set, do nothing
+ if not inventory_mode:
+ return
+
+ if inventory_mode == "automatic":
+ inventory_mode = int(1)
+ elif inventory_mode == "manual":
+ inventory_mode = int(0)
+ elif inventory_mode == "disabled":
+ inventory_mode = int(-1)
+
+ # watch for - https://support.zabbix.com/browse/ZBX-6033
+ request_str = {'hostid': host_id, 'inventory_mode': inventory_mode}
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.update(request_str)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e)
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ host_name=dict(type='str', required=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ host_groups=dict(type='list', required=False),
+ link_templates=dict(type='list', required=False),
+ status=dict(default="enabled", choices=['enabled', 'disabled']),
+ state=dict(default="present", choices=['present', 'absent']),
+ inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']),
+ timeout=dict(type='int', default=10),
+ interfaces=dict(type='list', required=False),
+ force=dict(type='bool', default=True),
+ proxy=dict(type='str', required=False)
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
+
+ server_url = module.params['server_url']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ host_name = module.params['host_name']
+ host_groups = module.params['host_groups']
+ link_templates = module.params['link_templates']
+ inventory_mode = module.params['inventory_mode']
+ status = module.params['status']
+ state = module.params['state']
+ timeout = module.params['timeout']
+ interfaces = module.params['interfaces']
+ force = module.params['force']
+ proxy = module.params['proxy']
+
+ # convert enabled to 0; disabled to 1
+ status = 1 if status == "disabled" else 0
+
+ zbx = None
+ # login to zabbix
+ try:
+ zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
+ zbx.login(login_user, login_password)
+ except Exception, e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ host = Host(module, zbx)
+
+ template_ids = []
+ if link_templates:
+ template_ids = host.get_template_ids(link_templates)
+
+ group_ids = []
+
+ if host_groups:
+ group_ids = host.get_group_ids_by_group_names(host_groups)
+
+ ip = ""
+ if interfaces:
+ for interface in interfaces:
+ if interface['type'] == 1:
+ ip = interface['ip']
+
+ # check if host exist
+ is_host_exist = host.is_host_exist(host_name)
+
+ if is_host_exist:
+ # Use proxy specified, or set to None when updating host
+ if proxy:
+ proxy_id = host.get_proxyid_by_proxy_name(proxy)
+ else:
+ proxy_id = None
+
+ # get host id by host name
+ zabbix_host_obj = host.get_host_by_host_name(host_name)
+ host_id = zabbix_host_obj['hostid']
+
+ if state == "absent":
+ # remove host
+ host.delete_host(host_id, host_name)
+ module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
+ else:
+ if not group_ids:
+ module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
+
+ if not force:
+ module.fail_json(changed=False, result="Host present, Can't update configuration without force")
+
+ # get exist host's interfaces
+ exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
+ exist_interfaces_copy = copy.deepcopy(exist_interfaces)
+
+ # update host
+ interfaces_len = len(interfaces) if interfaces else 0
+
+ if len(exist_interfaces) > interfaces_len:
+ if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
+ exist_interfaces, zabbix_host_obj, proxy_id):
+ host.link_or_clear_template(host_id, template_ids)
+ host.update_host(host_name, group_ids, status, host_id,
+ interfaces, exist_interfaces, proxy_id)
+ module.exit_json(changed=True,
+ result="Successfully update host %s (%s) and linked with template '%s'"
+ % (host_name, ip, link_templates))
+ else:
+ module.exit_json(changed=False)
+ else:
+ if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
+ exist_interfaces_copy, zabbix_host_obj, proxy_id):
+ host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id)
+ host.link_or_clear_template(host_id, template_ids)
+ host.update_inventory_mode(host_id, inventory_mode)
+ module.exit_json(changed=True,
+ result="Successfully update host %s (%s) and linked with template '%s'"
+ % (host_name, ip, link_templates))
+ else:
+ module.exit_json(changed=False)
+ else:
+ if state == "absent":
+ # the host is already deleted.
+ module.exit_json(changed=False)
+
+ # Use proxy specified, or set to 0 when adding new host
+ if proxy:
+ proxy_id = host.get_proxyid_by_proxy_name(proxy)
+ else:
+ proxy_id = 0
+
+ if not group_ids:
+ module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
+
+ if not interfaces or (interfaces and len(interfaces) == 0):
+ module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
+
+ # create host
+ host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id)
+ host.link_or_clear_template(host_id, template_ids)
+ host.update_inventory_mode(host_id, inventory_mode)
+ module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
+ host_name, ip, link_templates))
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/zabbix_hostmacro.py b/lib/ansible/modules/extras/monitoring/zabbix_hostmacro.py
new file mode 100644
index 0000000000..c0e3f8c228
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/zabbix_hostmacro.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013-2014, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: zabbix_hostmacro
+short_description: Zabbix host macro creates/updates/deletes
+description:
+ - manages Zabbix host macros, it can create, update or delete them.
+version_added: "2.0"
+author:
+ - "(@cave)"
+ - Dean Hailin Song
+requirements:
+ - "python >= 2.6"
+ - zabbix-api
+options:
+ server_url:
+ description:
+ - Url of Zabbix server, with protocol (http or https).
+ required: true
+ aliases: [ "url" ]
+ login_user:
+ description:
+ - Zabbix user name.
+ required: true
+ login_password:
+ description:
+ - Zabbix user password.
+ required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
+ host_name:
+ description:
+ - Name of the host.
+ required: true
+ macro_name:
+ description:
+ - Name of the host macro.
+ required: true
+ macro_value:
+ description:
+ - Value of the host macro.
+ required: true
+ state:
+ description:
+ - State of the macro.
+ - On C(present), it will create if macro does not exist or update the macro if the associated data is different.
+ - On C(absent) will remove a macro if it exists.
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+ timeout:
+ description:
+ - The timeout of API request (seconds).
+ default: 10
+'''
+
+EXAMPLES = '''
+- name: Create a new host macro or update an existing macro's value
+ local_action:
+ module: zabbix_hostmacro
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ host_name: ExampleHost
+ macro_name:Example macro
+ macro_value:Example value
+ state: present
+'''
+
+import logging
+import copy
+
+try:
+ from zabbix_api import ZabbixAPI, ZabbixAPISubClass
+
+ HAS_ZABBIX_API = True
+except ImportError:
+ HAS_ZABBIX_API = False
+
+
+# Extend the ZabbixAPI
+# Since the zabbix-api python module too old (version 1.0, no higher version so far).
+class ZabbixAPIExtends(ZabbixAPI):
+ def __init__(self, server, timeout, user, passwd, **kwargs):
+ ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
+
+
+class HostMacro(object):
+ def __init__(self, module, zbx):
+ self._module = module
+ self._zapi = zbx
+
+ # get host id by host name
+ def get_host_id(self, host_name):
+ try:
+ host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}})
+ if len(host_list) < 1:
+ self._module.fail_json(msg="Host not found: %s" % host_name)
+ else:
+ host_id = host_list[0]['hostid']
+ return host_id
+ except Exception, e:
+ self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e))
+
+ # get host macro
+ def get_host_macro(self, macro_name, host_id):
+ try:
+ host_macro_list = self._zapi.usermacro.get(
+ {"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': '{$' + macro_name + '}'}})
+ if len(host_macro_list) > 0:
+ return host_macro_list[0]
+ return None
+ except Exception, e:
+ self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e))
+
+ # create host macro
+ def create_host_macro(self, macro_name, macro_value, host_id):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value})
+ self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e))
+
+ # update host macro
+ def update_host_macro(self, host_macro_obj, macro_name, macro_value):
+ host_macro_id = host_macro_obj['hostmacroid']
+ if host_macro_obj['macro'] == '{$'+macro_name+'}' and host_macro_obj['value'] == macro_value:
+ self._module.exit_json(changed=False, result="Host macro %s already up to date" % macro_name)
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value})
+ self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e))
+
+ # delete host macro
+ def delete_host_macro(self, host_macro_obj, macro_name):
+ host_macro_id = host_macro_obj['hostmacroid']
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.usermacro.delete([host_macro_id])
+ self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name)
+ except Exception, e:
+ self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e))
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ host_name=dict(type='str', required=True),
+ macro_name=dict(type='str', required=True),
+ macro_value=dict(type='str', required=True),
+ state=dict(default="present", choices=['present', 'absent']),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
+
+ server_url = module.params['server_url']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ host_name = module.params['host_name']
+ macro_name = (module.params['macro_name']).upper()
+ macro_value = module.params['macro_value']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ zbx = None
+ # login to zabbix
+ try:
+ zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
+ zbx.login(login_user, login_password)
+ except Exception, e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ host_macro_class_obj = HostMacro(module, zbx)
+
+ changed = False
+
+ if host_name:
+ host_id = host_macro_class_obj.get_host_id(host_name)
+ host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id)
+
+ if state == 'absent':
+ if not host_macro_obj:
+ module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name)
+ else:
+ # delete a macro
+ host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name)
+ else:
+ if not host_macro_obj:
+ # create host macro
+ host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id)
+ else:
+ # update host macro
+ host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value)
+
+from ansible.module_utils.basic import *
+main()
+
diff --git a/lib/ansible/modules/extras/monitoring/zabbix_maintenance.py b/lib/ansible/modules/extras/monitoring/zabbix_maintenance.py
new file mode 100644
index 0000000000..89f792ce5d
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/zabbix_maintenance.py
@@ -0,0 +1,377 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+
+module: zabbix_maintenance
+short_description: Create Zabbix maintenance windows
+description:
+ - This module will let you create Zabbix maintenance windows.
+version_added: "1.8"
+author: "Alexander Bulimov (@abulimov)"
+requirements:
+ - "python >= 2.6"
+ - zabbix-api
+options:
+ state:
+ description:
+ - Create or remove a maintenance window.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ server_url:
+ description:
+ - Url of Zabbix server, with protocol (http or https).
+ C(url) is an alias for C(server_url).
+ required: true
+ default: null
+ aliases: [ "url" ]
+ login_user:
+ description:
+ - Zabbix user name.
+ required: true
+ login_password:
+ description:
+ - Zabbix user password.
+ required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
+ host_names:
+ description:
+ - Hosts to manage maintenance window for.
+ Separate multiple hosts with commas.
+ C(host_name) is an alias for C(host_names).
+ B(Required) option when C(state) is I(present)
+ and no C(host_groups) specified.
+ required: false
+ default: null
+ aliases: [ "host_name" ]
+ host_groups:
+ description:
+ - Host groups to manage maintenance window for.
+ Separate multiple groups with commas.
+ C(host_group) is an alias for C(host_groups).
+ B(Required) option when C(state) is I(present)
+ and no C(host_names) specified.
+ required: false
+ default: null
+ aliases: [ "host_group" ]
+ minutes:
+ description:
+ - Length of maintenance window in minutes.
+ required: false
+ default: 10
+ name:
+ description:
+ - Unique name of maintenance window.
+ required: true
+ desc:
+ description:
+ - Short description of maintenance window.
+ required: true
+ default: Created by Ansible
+ collect_data:
+ description:
+ - Type of maintenance. With data collection, or without.
+ required: false
+ default: "true"
+ timeout:
+ description:
+ - The timeout of API request (seconds).
+ default: 10
+ version_added: "2.1"
+ required: false
+notes:
+ - Useful for setting hosts in maintenance mode before big update,
+ and removing maintenance window after update.
+ - Module creates maintenance window from now() to now() + minutes,
+ so if Zabbix server's time and host's time are not synchronized,
+ you will get strange results.
+ - Install required module with 'pip install zabbix-api' command.
+ - Checks existance only by maintenance name.
+'''
+
+EXAMPLES = '''
+# Create maintenance window named "Update of www1"
+# for host www1.example.com for 90 minutes
+- zabbix_maintenance: name="Update of www1"
+ host_name=www1.example.com
+ state=present
+ minutes=90
+ server_url=https://monitoring.example.com
+ login_user=ansible
+ login_password=pAsSwOrD
+
+# Create maintenance window named "Mass update"
+# for host www1.example.com and host groups Office and Dev
+- zabbix_maintenance: name="Update of www1"
+ host_name=www1.example.com
+ host_groups=Office,Dev
+ state=present
+ server_url=https://monitoring.example.com
+ login_user=ansible
+ login_password=pAsSwOrD
+
+# Create maintenance window named "update"
+# for hosts www1.example.com and db1.example.com and without data collection.
+- zabbix_maintenance: name=update
+ host_names=www1.example.com,db1.example.com
+ state=present
+ collect_data=false
+ server_url=https://monitoring.example.com
+ login_user=ansible
+ login_password=pAsSwOrD
+
+# Remove maintenance window named "Test1"
+- zabbix_maintenance: name=Test1
+ state=absent
+ server_url=https://monitoring.example.com
+ login_user=ansible
+ login_password=pAsSwOrD
+'''
+
+import datetime
+import time
+
+try:
+ from zabbix_api import ZabbixAPI
+ HAS_ZABBIX_API = True
+except ImportError:
+ HAS_ZABBIX_API = False
+
+
+def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc):
+ end_time = start_time + period
+ try:
+ zbx.maintenance.create(
+ {
+ "groupids": group_ids,
+ "hostids": host_ids,
+ "name": name,
+ "maintenance_type": maintenance_type,
+ "active_since": str(start_time),
+ "active_till": str(end_time),
+ "description": desc,
+ "timeperiods": [{
+ "timeperiod_type": "0",
+ "start_date": str(start_time),
+ "period": str(period),
+ }]
+ }
+ )
+ except BaseException as e:
+ return 1, None, str(e)
+ return 0, None, None
+
+
+def get_maintenance_id(zbx, name):
+ try:
+ result = zbx.maintenance.get(
+ {
+ "filter":
+ {
+ "name": name,
+ }
+ }
+ )
+ except BaseException as e:
+ return 1, None, str(e)
+
+ maintenance_ids = []
+ for res in result:
+ maintenance_ids.append(res["maintenanceid"])
+
+ return 0, maintenance_ids, None
+
+
+def delete_maintenance(zbx, maintenance_id):
+ try:
+ zbx.maintenance.delete(maintenance_id)
+ except BaseException as e:
+ return 1, None, str(e)
+ return 0, None, None
+
+
+def get_group_ids(zbx, host_groups):
+ group_ids = []
+ for group in host_groups:
+ try:
+ result = zbx.hostgroup.get(
+ {
+ "output": "extend",
+ "filter":
+ {
+ "name": group
+ }
+ }
+ )
+ except BaseException as e:
+ return 1, None, str(e)
+
+ if not result:
+ return 1, None, "Group id for group %s not found" % group
+
+ group_ids.append(result[0]["groupid"])
+
+ return 0, group_ids, None
+
+
+def get_host_ids(zbx, host_names):
+ host_ids = []
+ for host in host_names:
+ try:
+ result = zbx.host.get(
+ {
+ "output": "extend",
+ "filter":
+ {
+ "name": host
+ }
+ }
+ )
+ except BaseException as e:
+ return 1, None, str(e)
+
+ if not result:
+ return 1, None, "Host id for host %s not found" % host
+
+ host_ids.append(result[0]["hostid"])
+
+ return 0, host_ids, None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ server_url=dict(type='str', required=True, default=None, aliases=['url']),
+ host_names=dict(type='list', required=False, default=None, aliases=['host_name']),
+ minutes=dict(type='int', required=False, default=10),
+ host_groups=dict(type='list', required=False, default=None, aliases=['host_group']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ name=dict(type='str', required=True),
+ desc=dict(type='str', required=False, default="Created by Ansible"),
+ collect_data=dict(type='bool', required=False, default=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
+
+ host_names = module.params['host_names']
+ host_groups = module.params['host_groups']
+ state = module.params['state']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ minutes = module.params['minutes']
+ name = module.params['name']
+ desc = module.params['desc']
+ server_url = module.params['server_url']
+ collect_data = module.params['collect_data']
+ timeout = module.params['timeout']
+
+ if collect_data:
+ maintenance_type = 0
+ else:
+ maintenance_type = 1
+
+ try:
+ zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
+ zbx.login(login_user, login_password)
+ except BaseException as e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ changed = False
+
+ if state == "present":
+
+ now = datetime.datetime.now()
+ start_time = time.mktime(now.timetuple())
+ period = 60 * int(minutes) # N * 60 seconds
+
+ if host_groups:
+ (rc, group_ids, error) = get_group_ids(zbx, host_groups)
+ if rc != 0:
+ module.fail_json(msg="Failed to get group_ids: %s" % error)
+ else:
+ group_ids = []
+
+ if host_names:
+ (rc, host_ids, error) = get_host_ids(zbx, host_names)
+ if rc != 0:
+ module.fail_json(msg="Failed to get host_ids: %s" % error)
+ else:
+ host_ids = []
+
+ (rc, maintenance, error) = get_maintenance_id(zbx, name)
+ if rc != 0:
+ module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error))
+
+ if not maintenance:
+ if not host_names and not host_groups:
+ module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.")
+
+ if module.check_mode:
+ changed = True
+ else:
+ (rc, _, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to create maintenance: %s" % error)
+
+ if state == "absent":
+
+ (rc, maintenance, error) = get_maintenance_id(zbx, name)
+ if rc != 0:
+ module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error))
+
+ if maintenance:
+ if module.check_mode:
+ changed = True
+ else:
+ (rc, _, error) = delete_maintenance(zbx, maintenance)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Failed to remove maintenance: %s" % error)
+
+ module.exit_json(changed=changed)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/lib/ansible/modules/extras/monitoring/zabbix_screen.py b/lib/ansible/modules/extras/monitoring/zabbix_screen.py
new file mode 100644
index 0000000000..ffdcb21b5f
--- /dev/null
+++ b/lib/ansible/modules/extras/monitoring/zabbix_screen.py
@@ -0,0 +1,435 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013-2014, Epic Games, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+DOCUMENTATION = '''
+---
+module: zabbix_screen
+short_description: Zabbix screen creates/updates/deletes
+description:
+ - This module allows you to create, modify and delete Zabbix screens and associated graph data.
+version_added: "2.0"
+author:
+ - "(@cove)"
+ - "Tony Minfei Ding"
+ - "Harrison Gu (@harrisongu)"
+requirements:
+ - "python >= 2.6"
+ - zabbix-api
+options:
+ server_url:
+ description:
+ - Url of Zabbix server, with protocol (http or https).
+ required: true
+ aliases: [ "url" ]
+ login_user:
+ description:
+ - Zabbix user name.
+ required: true
+ login_password:
+ description:
+ - Zabbix user password.
+ required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
+ timeout:
+ description:
+ - The timeout of API request (seconds).
+ default: 10
+ screens:
+ description:
+ - List of screens to be created/updated/deleted(see example).
+ - If the screen(s) already been added, the screen(s) name won't be updated.
+ - When creating or updating screen(s), C(screen_name), C(host_group) are required.
+ - When deleting screen(s), the C(screen_name) is required.
+ - 'The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s) will just be updated as needed.'
+ required: true
+notes:
+ - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed.
+'''
+
+EXAMPLES = '''
+# Create/update a screen.
+- name: Create a new screen or update an existing screen's items
+ local_action:
+ module: zabbix_screen
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ screens:
+ - screen_name: ExampleScreen1
+ host_group: Example group1
+ state: present
+ graph_names:
+ - Example graph1
+ - Example graph2
+ graph_width: 200
+ graph_height: 100
+
+# Create/update multi-screen
+- name: Create two of new screens or update the existing screens' items
+ local_action:
+ module: zabbix_screen
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ screens:
+ - screen_name: ExampleScreen1
+ host_group: Example group1
+ state: present
+ graph_names:
+ - Example graph1
+ - Example graph2
+ graph_width: 200
+ graph_height: 100
+ - screen_name: ExampleScreen2
+ host_group: Example group2
+ state: present
+ graph_names:
+ - Example graph1
+ - Example graph2
+ graph_width: 200
+ graph_height: 100
+
+# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurent updates
+- name: Create a new screen or update an existing screen's items
+ local_action:
+ module: zabbix_screen
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ state: present
+ screens:
+ - screen_name: ExampleScreen
+ host_group: Example group
+ state: present
+ graph_names:
+ - Example graph1
+ - Example graph2
+ graph_width: 200
+ graph_height: 100
+ when: inventory_hostname==groups['group_name'][0]
+'''
+
+try:
+ from zabbix_api import ZabbixAPI, ZabbixAPISubClass
+ from zabbix_api import ZabbixAPIException
+ from zabbix_api import Already_Exists
+ HAS_ZABBIX_API = True
+except ImportError:
+ HAS_ZABBIX_API = False
+
+
+# Extend the ZabbixAPI
+# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call,
+# we have to inherit the ZabbixAPI class to add 'screenitem' support.
+class ZabbixAPIExtends(ZabbixAPI):
+ screenitem = None
+
+ def __init__(self, server, timeout, user, passwd, **kwargs):
+ ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
+ self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs))
+
+
+class Screen(object):
+ def __init__(self, module, zbx):
+ self._module = module
+ self._zapi = zbx
+
+ # get group id by group name
+ def get_host_group_id(self, group_name):
+ if group_name == "":
+ self._module.fail_json(msg="group_name is required")
+ hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}})
+ if len(hostGroup_list) < 1:
+ self._module.fail_json(msg="Host group not found: %s" % group_name)
+ else:
+ hostGroup_id = hostGroup_list[0]['groupid']
+ return hostGroup_id
+
+ # get monitored host_id by host_group_id
+ def get_host_ids_by_group_id(self, group_id):
+ host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1})
+ if len(host_list) < 1:
+ self._module.fail_json(msg="No host in the group.")
+ else:
+ host_ids = []
+ for i in host_list:
+ host_id = i['hostid']
+ host_ids.append(host_id)
+ return host_ids
+
+ # get screen
+ def get_screen_id(self, screen_name):
+ if screen_name == "":
+ self._module.fail_json(msg="screen_name is required")
+ try:
+ screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}})
+ if len(screen_id_list) >= 1:
+ screen_id = screen_id_list[0]['screenid']
+ return screen_id
+ return None
+ except Exception as e:
+ self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e))
+
+ # create screen
+ def create_screen(self, screen_name, h_size, v_size):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size})
+ return screen['screenids'][0]
+ except Exception as e:
+ self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e))
+
+ # update screen
+ def update_screen(self, screen_id, screen_name, h_size, v_size):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size})
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e))
+
+ # delete screen
+ def delete_screen(self, screen_id, screen_name):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.screen.delete([screen_id])
+ except Exception as e:
+ self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e))
+
+ # get graph ids
+ def get_graph_ids(self, hosts, graph_name_list):
+ graph_id_lists = []
+ vsize = 1
+ for host in hosts:
+ graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
+ size = len(graph_id_list)
+ if size > 0:
+ graph_id_lists.extend(graph_id_list)
+ if vsize < size:
+ vsize = size
+ return graph_id_lists, vsize
+
+ # getGraphs
+ def get_graphs_by_host_id(self, graph_name_list, host_id):
+ graph_ids = []
+ for graph_name in graph_name_list:
+ graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id})
+ graph_id_list = []
+ if len(graphs_list) > 0:
+ for graph in graphs_list:
+ graph_id = graph['graphid']
+ graph_id_list.append(graph_id)
+ if len(graph_id_list) > 0:
+ graph_ids.extend(graph_id_list)
+ return graph_ids
+
+ # get screen items
+ def get_screen_items(self, screen_id):
+ screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id})
+ return screen_item_list
+
+ # delete screen items
+ def delete_screen_items(self, screen_id, screen_item_id_list):
+ try:
+ if len(screen_item_id_list) == 0:
+ return True
+ screen_item_list = self.get_screen_items(screen_id)
+ if len(screen_item_list) > 0:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.screenitem.delete(screen_item_id_list)
+ return True
+ return False
+ except ZabbixAPIException:
+ pass
+
+ # get screen's hsize and vsize
+ def get_hsize_vsize(self, hosts, v_size):
+ h_size = len(hosts)
+ if h_size == 1:
+ if v_size == 1:
+ h_size = 1
+ elif v_size in range(2, 9):
+ h_size = 2
+ else:
+ h_size = 3
+ v_size = (v_size - 1) / h_size + 1
+ return h_size, v_size
+
+ # create screen_items
+ def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size):
+ if len(hosts) < 4:
+ if width is None or width < 0:
+ width = 500
+ else:
+ if width is None or width < 0:
+ width = 200
+ if height is None or height < 0:
+ height = 100
+
+ try:
+ # when there're only one host, only one row is not good.
+ if len(hosts) == 1:
+ graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0])
+ for i, graph_id in enumerate(graph_id_list):
+ if graph_id is not None:
+ self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
+ 'width': width, 'height': height,
+ 'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1,
+ 'elements': 0, 'valign': 0, 'halign': 0,
+ 'style': 0, 'dynamic': 0, 'sort_triggers': 0})
+ else:
+ for i, host in enumerate(hosts):
+ graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
+ for j, graph_id in enumerate(graph_id_list):
+ if graph_id is not None:
+ self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
+ 'width': width, 'height': height,
+ 'x': i, 'y': j, 'colspan': 1, 'rowspan': 1,
+ 'elements': 0, 'valign': 0, 'halign': 0,
+ 'style': 0, 'dynamic': 0, 'sort_triggers': 0})
+ except Already_Exists:
+ pass
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ timeout=dict(type='int', default=10),
+ screens=dict(type='list', required=True)
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
+
+ server_url = module.params['server_url']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ timeout = module.params['timeout']
+ screens = module.params['screens']
+
+ zbx = None
+ # login to zabbix
+ try:
+ zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
+ zbx.login(login_user, login_password)
+ except Exception, e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ screen = Screen(module, zbx)
+ created_screens = []
+ changed_screens = []
+ deleted_screens = []
+
+ for zabbix_screen in screens:
+ screen_name = zabbix_screen['screen_name']
+ screen_id = screen.get_screen_id(screen_name)
+ state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present"
+
+ if state == "absent":
+ if screen_id:
+ screen_item_list = screen.get_screen_items(screen_id)
+ screen_item_id_list = []
+ for screen_item in screen_item_list:
+ screen_item_id = screen_item['screenitemid']
+ screen_item_id_list.append(screen_item_id)
+ screen.delete_screen_items(screen_id, screen_item_id_list)
+ screen.delete_screen(screen_id, screen_name)
+
+ deleted_screens.append(screen_name)
+ else:
+ host_group = zabbix_screen['host_group']
+ graph_names = zabbix_screen['graph_names']
+ graph_width = None
+ if 'graph_width' in zabbix_screen:
+ graph_width = zabbix_screen['graph_width']
+ graph_height = None
+ if 'graph_height' in zabbix_screen:
+ graph_height = zabbix_screen['graph_height']
+ host_group_id = screen.get_host_group_id(host_group)
+ hosts = screen.get_host_ids_by_group_id(host_group_id)
+
+ screen_item_id_list = []
+ resource_id_list = []
+
+ graph_ids, v_size = screen.get_graph_ids(hosts, graph_names)
+ h_size, v_size = screen.get_hsize_vsize(hosts, v_size)
+
+ if not screen_id:
+ # create screen
+ screen_id = screen.create_screen(screen_name, h_size, v_size)
+ screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
+ created_screens.append(screen_name)
+ else:
+ screen_item_list = screen.get_screen_items(screen_id)
+
+ for screen_item in screen_item_list:
+ screen_item_id = screen_item['screenitemid']
+ resource_id = screen_item['resourceid']
+ screen_item_id_list.append(screen_item_id)
+ resource_id_list.append(resource_id)
+
+ # when the screen items changed, then update
+ if graph_ids != resource_id_list:
+ deleted = screen.delete_screen_items(screen_id, screen_item_id_list)
+ if deleted:
+ screen.update_screen(screen_id, screen_name, h_size, v_size)
+ screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
+ changed_screens.append(screen_name)
+
+ if created_screens and changed_screens:
+ module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens)))
+ elif created_screens:
+ module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens))
+ elif changed_screens:
+ module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens))
+ elif deleted_screens:
+ module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens))
+ else:
+ module.exit_json(changed=False)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()