diff --git a/Vagrantfile b/Vagrantfile index 988c69527..46eebf2cb 100755 --- a/Vagrantfile +++ b/Vagrantfile @@ -24,7 +24,7 @@ Vagrant.configure(2) do |config| echo "tsflags=nodocs" | tee -a /etc/yum.conf yum -y install epel-release #sed -i 's/^mirrorlist/#mirrorlist/; s/^#baseurl/baseurl/' /etc/yum.repos.d/{CentOS-Base.repo,epel.repo} - yum -y install https://packagecloud.io/rocknsm/2_2/packages/el/7/rock-release-2.2.0-2.noarch.rpm/download.rpm + yum -y install https://packagecloud.io/rocknsm/2_3/packages/el/7/rock-release-2.3-1.noarch.rpm/download.rpm yum -y update yum -y install ansible vim git tmux tito # Create virtual interface diff --git a/ansible.cfg b/ansible.cfg deleted file mode 100644 index 1c601df6c..000000000 --- a/ansible.cfg +++ /dev/null @@ -1,401 +0,0 @@ -# config file for ansible -- https://ansible.com/ -# =============================================== - -# nearly all parameters can be overridden in ansible-playbook -# or with command line flags. ansible will read ANSIBLE_CONFIG, -# ansible.cfg in the current working directory, .ansible.cfg in -# the home directory or /etc/ansible/ansible.cfg, whichever it -# finds first - -[defaults] - -# some basic default values... - -inventory = playbooks/inventory/all-in-one.ini -#library = /usr/share/my_modules/ -#module_utils = /usr/share/my_module_utils/ -#remote_tmp = ~/.ansible/tmp -#local_tmp = ~/.ansible/tmp -#forks = 5 -#poll_interval = 15 -#sudo_user = root -#ask_sudo_pass = True -#ask_pass = True -#transport = smart -#remote_port = 22 -#module_lang = C -#module_set_locale = False - -# plays will gather facts by default, which contain information about -# the remote system. -# -# smart - gather by default, but don't regather if already gathered -# implicit - gather by default, turn off with gather_facts: False -# explicit - do not gather by default, must say gather_facts: True -#gathering = implicit - -# This only affects the gathering done by a play's gather_facts directive, -# by default gathering retrieves all facts subsets -# all - gather all subsets -# network - gather min and network facts -# hardware - gather hardware facts (longest facts to retrieve) -# virtual - gather min and virtual facts -# facter - import facts from facter -# ohai - import facts from ohai -# You can combine them using comma (ex: network,virtual) -# You can negate them using ! (ex: !hardware,!facter,!ohai) -# A minimal set of facts is always gathered. -#gather_subset = all - -# some hardware related facts are collected -# with a maximum timeout of 10 seconds. This -# option lets you increase or decrease that -# timeout to something more suitable for the -# environment. -# gather_timeout = 10 - -# additional paths to search for roles in, colon separated -roles_path = playbooks/roles - -# uncomment this to disable SSH key host checking -#host_key_checking = False - -# change the default callback -#stdout_callback = skippy -# enable additional callbacks -#callback_whitelist = timer, mail - -# Determine whether includes in tasks and handlers are "static" by -# default. As of 2.0, includes are dynamic by default. Setting these -# values to True will make includes behave more like they did in the -# 1.x versions. -#task_includes_static = True -#handler_includes_static = True - -# Controls if a missing handler for a notification event is an error or a warning -#error_on_missing_handler = True - -# change this for alternative sudo implementations -#sudo_exe = sudo - -# What flags to pass to sudo -# WARNING: leaving out the defaults might create unexpected behaviours -#sudo_flags = -H -S -n - -# SSH timeout -#timeout = 10 - -# default user to use for playbooks if user is not specified -# (/usr/bin/ansible will use current user as default) -#remote_user = root - -# logging is off by default unless this path is defined -# if so defined, consider logrotate -#log_path = /var/log/ansible.log - -# default module name for /usr/bin/ansible -#module_name = command - -# use this shell for commands executed under sudo -# you may need to change this to bin/bash in rare instances -# if sudo is constrained -#executable = /bin/sh - -# if inventory variables overlap, does the higher precedence one win -# or are hash values merged together? The default is 'replace' but -# this can also be set to 'merge'. -#hash_behaviour = replace - -# by default, variables from roles will be visible in the global variable -# scope. To prevent this, the following option can be enabled, and only -# tasks and handlers within the role will see the variables there -#private_role_vars = yes - -# list any Jinja2 extensions to enable here: -#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n - -# if set, always use this private key file for authentication, same as -# if passing --private-key to ansible or ansible-playbook -#private_key_file = /path/to/file - -# If set, configures the path to the Vault password file as an alternative to -# specifying --vault-password-file on the command line. -#vault_password_file = /path/to/vault_password_file - -# format of string {{ ansible_managed }} available within Jinja2 -# templates indicates to users editing templates files will be replaced. -# replacing {file}, {host} and {uid} and strftime codes with proper values. -#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} -# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence -# in some situations so the default is a static string: -#ansible_managed = Ansible managed - -# by default, ansible-playbook will display "Skipping [host]" if it determines a task -# should not be run on a host. Set this to "False" if you don't want to see these "Skipping" -# messages. NOTE: the task header will still be shown regardless of whether or not the -# task is skipped. -#display_skipped_hosts = True - -# by default, if a task in a playbook does not include a name: field then -# ansible-playbook will construct a header that includes the task's action but -# not the task's args. This is a security feature because ansible cannot know -# if the *module* considers an argument to be no_log at the time that the -# header is printed. If your environment doesn't have a problem securing -# stdout from ansible-playbook (or you have manually specified no_log in your -# playbook on all of the tasks where you have secret information) then you can -# safely set this to True to get more informative messages. -#display_args_to_stdout = False - -# by default (as of 1.3), Ansible will raise errors when attempting to dereference -# Jinja2 variables that are not set in templates or action lines. Uncomment this line -# to revert the behavior to pre-1.3. -#error_on_undefined_vars = False - -# by default (as of 1.6), Ansible may display warnings based on the configuration of the -# system running ansible itself. This may include warnings about 3rd party packages or -# other conditions that should be resolved if possible. -# to disable these warnings, set the following value to False: -#system_warnings = True - -# by default (as of 1.4), Ansible may display deprecation warnings for language -# features that should no longer be used and will be removed in future versions. -# to disable these warnings, set the following value to False: -#deprecation_warnings = True - -# (as of 1.8), Ansible can optionally warn when usage of the shell and -# command module appear to be simplified by using a default Ansible module -# instead. These warnings can be silenced by adjusting the following -# setting or adding warn=yes or warn=no to the end of the command line -# parameter string. This will for example suggest using the git module -# instead of shelling out to the git command. -# command_warnings = False - - -# set plugin path directories here, separate with colons -#action_plugins = /usr/share/ansible/plugins/action -#cache_plugins = /usr/share/ansible/plugins/cache -#callback_plugins = /usr/share/ansible/plugins/callback -#connection_plugins = /usr/share/ansible/plugins/connection -#lookup_plugins = /usr/share/ansible/plugins/lookup -#inventory_plugins = /usr/share/ansible/plugins/inventory -#vars_plugins = /usr/share/ansible/plugins/vars -#filter_plugins = /usr/share/ansible/plugins/filter -#test_plugins = /usr/share/ansible/plugins/test -#strategy_plugins = /usr/share/ansible/plugins/strategy - - -# by default, ansible will use the 'linear' strategy but you may want to try -# another one -#strategy = free - -# by default callbacks are not loaded for /bin/ansible, enable this if you -# want, for example, a notification or logging callback to also apply to -# /bin/ansible runs -#bin_ansible_callbacks = False - - -# don't like cows? that's unfortunate. -# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 -#nocows = 1 - -# set which cowsay stencil you'd like to use by default. When set to 'random', -# a random stencil will be selected for each task. The selection will be filtered -# against the `cow_whitelist` option below. -#cow_selection = default -#cow_selection = random - -# when using the 'random' option for cowsay, stencils will be restricted to this list. -# it should be formatted as a comma-separated list with no spaces between names. -# NOTE: line continuations here are for formatting purposes only, as the INI parser -# in python does not support them. -#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\ -# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\ -# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www - -# don't like colors either? -# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 -#nocolor = 1 - -# if set to a persistent type (not 'memory', for example 'redis') fact values -# from previous runs in Ansible will be stored. This may be useful when -# wanting to use, for example, IP information from one group of servers -# without having to talk to them in the same playbook run to get their -# current IP information. -#fact_caching = memory - - -# retry files -# When a playbook fails by default a .retry file will be created in ~/ -# You can disable this feature by setting retry_files_enabled to False -# and you can change the location of the files by setting retry_files_save_path - -#retry_files_enabled = False -#retry_files_save_path = ~/.ansible-retry - -# squash actions -# Ansible can optimise actions that call modules with list parameters -# when looping. Instead of calling the module once per with_ item, the -# module is called once with all items at once. Currently this only works -# under limited circumstances, and only with parameters named 'name'. -#squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper - -# prevents logging of task data, off by default -#no_log = False - -# prevents logging of tasks, but only on the targets, data is still logged on the master/controller -#no_target_syslog = False - -# controls whether Ansible will raise an error or warning if a task has no -# choice but to create world readable temporary files to execute a module on -# the remote machine. This option is False by default for security. Users may -# turn this on to have behaviour more like Ansible prior to 2.1.x. See -# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user -# for more secure ways to fix this than enabling this option. -#allow_world_readable_tmpfiles = False - -# controls the compression level of variables sent to -# worker processes. At the default of 0, no compression -# is used. This value must be an integer from 0 to 9. -#var_compression_level = 9 - -# controls what compression method is used for new-style ansible modules when -# they are sent to the remote system. The compression types depend on having -# support compiled into both the controller's python and the client's python. -# The names should match with the python Zipfile compression types: -# * ZIP_STORED (no compression. available everywhere) -# * ZIP_DEFLATED (uses zlib, the default) -# These values may be set per host via the ansible_module_compression inventory -# variable -#module_compression = 'ZIP_DEFLATED' - -# This controls the cutoff point (in bytes) on --diff for files -# set to 0 for unlimited (RAM may suffer!). -#max_diff_size = 1048576 - -# This controls how ansible handles multiple --tags and --skip-tags arguments -# on the CLI. If this is True then multiple arguments are merged together. If -# it is False, then the last specified argument is used and the others are ignored. -#merge_multiple_cli_flags = False - -# Controls showing custom stats at the end, off by default -#show_custom_stats = True - -# Controlls which files to ignore when using a directory as inventory with -# possibly multiple sources (both static and dynamic) -#inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo - -[privilege_escalation] -become=True -become_method=sudo -#become_user=root -#become_ask_pass=False - -[paramiko_connection] - -# uncomment this line to cause the paramiko connection plugin to not record new host -# keys encountered. Increases performance on new host additions. Setting works independently of the -# host key checking setting above. -#record_host_keys=False - -# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this -# line to disable this behaviour. -#pty=False - -[ssh_connection] - -# ssh arguments to use -# Leaving off ControlPersist will result in poor performance, so use -# paramiko on older platforms rather than removing it, -C controls compression use -#ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s - -# The base directory for the ControlPath sockets. -# This is the "%(directory)s" in the control_path option -# -# Example: -# control_path_dir = /tmp/.ansible/cp -#control_path_dir = ~/.ansible/cp - -# The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname, -# port and username (empty string in the config). The hash mitigates a common problem users -# found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format. -# In those cases, a "too long for Unix domain socket" ssh error would occur. -# -# Example: -# control_path = %(directory)s/%%h-%%r -#control_path = - -# Enabling pipelining reduces the number of SSH operations required to -# execute a module on the remote server. This can result in a significant -# performance improvement when enabled, however when using "sudo:" you must -# first disable 'requiretty' in /etc/sudoers -# -# By default, this option is disabled to preserve compatibility with -# sudoers configurations that have requiretty (the default on many distros). -# -#pipelining = False - -# Control the mechanism for transferring files (old) -# * smart = try sftp and then try scp [default] -# * True = use scp only -# * False = use sftp only -#scp_if_ssh = smart - -# Control the mechanism for transferring files (new) -# If set, this will override the scp_if_ssh option -# * sftp = use sftp to transfer files -# * scp = use scp to transfer files -# * piped = use 'dd' over SSH to transfer files -# * smart = try sftp, scp, and piped, in that order [default] -#transfer_method = smart - -# if False, sftp will not use batch mode to transfer files. This may cause some -# types of file transfer failures impossible to catch however, and should -# only be disabled if your sftp version has problems with batch mode -#sftp_batch_mode = False - -[accelerate] -#accelerate_port = 5099 -#accelerate_timeout = 30 -#accelerate_connect_timeout = 5.0 - -# The daemon timeout is measured in minutes. This time is measured -# from the last activity to the accelerate daemon. -#accelerate_daemon_timeout = 30 - -# If set to yes, accelerate_multi_key will allow multiple -# private keys to be uploaded to it, though each user must -# have access to the system via SSH to add a new key. The default -# is "no". -#accelerate_multi_key = yes - -[selinux] -# file systems that require special treatment when dealing with security context -# the default behaviour that copies the existing context or uses the user default -# needs to be changed to use the file system dependent context. -#special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p - -# Set this to yes to allow libvirt_lxc connections to work without SELinux. -#libvirt_lxc_noseclabel = yes - -[colors] -#highlight = white -#verbose = blue -#warn = bright purple -#error = red -#debug = dark gray -#deprecate = purple -#skip = cyan -#unreachable = red -#ok = green -#changed = yellow -#diff_add = green -#diff_remove = red -#diff_lines = cyan - - -[diff] -# Always print diff when running ( same as always running with -D/--diff ) -# always = no - -# Set how many context lines to show in diff -# context = 3 diff --git a/bin/deploy_rock.sh b/bin/deploy_rock.sh index 7f3cf28ac..cd12c4438 100755 --- a/bin/deploy_rock.sh +++ b/bin/deploy_rock.sh @@ -1,13 +1,13 @@ #!/bin/bash SCRIPT_PATH=$(dirname $(readlink -f $0)) -TOPLEVEL=$(dirname ${SCRIPT_PATH}) +ROCK_HOME=/usr/share/rock VERBOSE_FLAGS= if [ "x${DEBUG}" != "x" ]; then VERBOSE_FLAGS="-vvv" fi -cd ${TOPLEVEL}/playbooks -ansible-playbook "${TOPLEVEL}/playbooks/site.yml" ${VERBOSE_FLAGS} +cd ${ROCK_HOME}/playbooks +ansible-playbook "${ROCK_HOME}/playbooks/site.yml" ${VERBOSE_FLAGS} if [ $? -eq 0 ]; then cat << 'EOF' diff --git a/bin/generate_defaults.sh b/bin/generate_defaults.sh index db880057e..7e9da4f37 100755 --- a/bin/generate_defaults.sh +++ b/bin/generate_defaults.sh @@ -1,22 +1,22 @@ #!/bin/bash SCRIPT_PATH=$(dirname $(readlink -f $0)) -TOPLEVEL=$(dirname ${SCRIPT_PATH}) +ROCK_HOME=/usr/share/rock -cd ${TOPLEVEL}/playbooks +cd ${ROCK_HOME}/playbooks # Check for /srv/rocknsm/repodata/repomd.xml.asc and set GPG checking bool: if [[ -f /srv/rocknsm/repodata/repomd.xml.asc ]]; then echo "Signing data for local repo found. Enabling GPG checking." - sed -i 's|rock_offline_gpgcheck: .*|rock_offline_gpgcheck: 1|' ${TOPLEVEL}/playbooks/group_vars/all.yml + sed -i 's|rock_offline_gpgcheck: .*|rock_offline_gpgcheck: 1|' ${ROCK_HOME}/playbooks/group_vars/all.yml sed -i 's|rock_offline_gpgcheck: .*|rock_offline_gpgcheck: 1|' /etc/rocknsm/config.yml else echo "No signing data for local repo found. Disabling GPG checking." - sed -i 's|rock_offline_gpgcheck: .*|rock_offline_gpgcheck: 0|' ${TOPLEVEL}/playbooks/group_vars/all.yml + sed -i 's|rock_offline_gpgcheck: .*|rock_offline_gpgcheck: 0|' ${ROCK_HOME}/playbooks/group_vars/all.yml sed -i 's|rock_offline_gpgcheck: .*|rock_offline_gpgcheck: 0|' /etc/rocknsm/config.yml fi -ansible-playbook "${TOPLEVEL}/playbooks/generate-defaults.yml" +ansible-playbook "${ROCK_HOME}/playbooks/generate-defaults.yml" retVal=$? if [ $retVal -ne 0 ]; then echo "Dumping default variables failed! Verify you can run sudo without a password." 1>&2 diff --git a/bin/reset_data.sh b/bin/reset_data.sh index a6a083507..c38c754ad 100755 --- a/bin/reset_data.sh +++ b/bin/reset_data.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_PATH=$(dirname $(readlink -f $0)) -TOPLEVEL=$(dirname ${SCRIPT_PATH}) +ROCK_HOME=/usr/share/rock VERBOSE_FLAGS= if [ "x${DEBUG}" != "x" ]; then VERBOSE_FLAGS="-vvv" @@ -15,9 +15,9 @@ then echo "Stopping Rock Services" -cd ${TOPLEVEL}/playbooks -ansible-playbook "${TOPLEVEL}/playbooks/delete-data.yml" ${VERBOSE_FLAGS} -ansible-playbook "${TOPLEVEL}/playbooks/deploy-rock.yml" ${VERBOSE_FLAGS} +cd ${ROCK_HOME}/playbooks +ansible-playbook "${ROCK_HOME}/playbooks/delete-data.yml" ${VERBOSE_FLAGS} +ansible-playbook "${ROCK_HOME}/playbooks/deploy-rock.yml" ${VERBOSE_FLAGS} -/sbin/rock_start -fi \ No newline at end of file +/usr/local/bin/rockctl start +fi diff --git a/book.json b/book.json deleted file mode 100644 index 482c07eba..000000000 --- a/book.json +++ /dev/null @@ -1,19 +0,0 @@ -{ -"root": "./docs/guide", -"plugins": [ -"addcssjs" -], -"structure": { -"readme": "README.adoc", -"summary": "SUMMARY.adoc" -}, -"pdf": { -"paperSize": "letter" -}, -"pluginsConfig": { -"addcssjs": { -"js": ["styles/adoc/adoc.js"], -"css": ["styles/adoc/adoc.css"] -} -} -} diff --git a/docs/ascii_logo.monopic b/docs/ascii_logo.monopic deleted file mode 100644 index 609816ec2..000000000 Binary files a/docs/ascii_logo.monopic and /dev/null differ diff --git a/docs/guide/README.adoc b/docs/guide/README.adoc deleted file mode 100644 index 718be6c23..000000000 --- a/docs/guide/README.adoc +++ /dev/null @@ -1,233 +0,0 @@ -= Response Operation Collections Kit -Derek Ditch ; Jeff Geiger -:icons: font -:experimental: - -This build was created and tested using CentOS 7.3. I pretty much guarantee that it won't work with anything else other than RHEL 7. Unless you have an operational need, I would suggest basing your system off of CentOS 7.3 (build 1611), as that is where the bulk of the testing of this has happened. - -NOTE: If your build isn't working, please review the <> and <>. The auto-generation script has to make some assumptions. - -**BE ADVISED:** This build process takes 3-10 minutes depending on your underlying hardware. There will be times where it seems like it quit. Be patient. You'll know when it's done, for better or worse. - -== Getting Started - -If you'd like to cut through the fluff and get going, check out the link:docs/guide/getting-started.adoc[ROCK 2.0 Getting Started Guide]. - -[[hardware-requirements]] -== Hardware Requirements -_(For anything other than a Vagrant build)_ - -NOTE: This is a shadow of a recommendation of a guideline. Your mileage may vary. No returns or refunds. - -|=== -| Resource | Recommendation - -| CPU -| 4 or more physical cores. - -| Memory -| 16GB (You can get away with 8GB, but it won't collect for long.) - -| Storage -| 256GB, with 200+ of that dedicated to `/data`. Honestly, throw everything you can at it. The higher the IOPS the better. - -| Network -| The system needs at least 2 network interfaces, one for management and one for collection. -|=== - -**GOLDEN RULE:** If you throw hardware at it, ROCK will use it. It will require some tuning to do so, but we'll continue documenting that. - -[[autodetect-assumptions]] -== Autodetect Assumptions - -When writing the scripts to generate default values, we had to make some assumptions. The defaults are generated according to these assumptions and should generally work if your sensor aligns with them. That said, these assumptions will give you a working sensor, but may need some love for higher performance. If you cannot meet these assumptions, look at the indicated configuration variables in `/etc/rocknsm/config.yml` for workaround approaches (with impact on performance). - -* You have two network interfaces: -** A management interface with a default route -** An interface without a default route (defined by `rock_monifs`) - -TIP: We assume that any interface that does not have a default route will be used for collection. Each sensor application will be configured accordingly. - -WARNING: This so far has been the number one problem with a fresh install for beta testers!! Check your interface configuration!! - -* You have mounted your largest storage volume(s) under `/data/` (defined by `rock_data_dir`) -* Your hostname (FQDN) is defined in the `ansible/inventory/all-in-one.ini` file. -* You allow management via SSH from any network (defined by `rock_mgmt_nets`) -* You wish to use Bro, Suricata, Stenographer (disabled by default) and the whole data pipeline. (See `with_*` options) -* If installed via ISO, you will perform an offline install, else we assume online (defined by `rock_online_install`)j -* Bro will use half of your CPU resources, up to 8 CPUs - -== Differences in ROCK 2.0 - -Lots has changed in the past year or so since we released the initial Chef build. First and foremost, the build now uses Ansible for the automation. Most packages are updated to the latest greatest, and we've swapped in Suricata and the default signature IDS over Snort. Some of the most significant upgrade, from a use standpoint, is the work we've put into Kibana. The data model is slightly different, to the advantage of the analyst. - -For more details, see the link:#[Release Notes] once we make them available. - -For more on the data model, see link:#[Data Model] once we document that. - - -== Vagrant - -This Vagrantfile is configured to give the VM 8GB of RAM. If your system can't do that you should buy a new system or adjust the `vm.memory` value. Anything below 8 is going to run like poopoo. You will also need to have a host-only adapter configured named `vboxnet0`. - -``` -git clone https://github.com/rocknsm/rock.git -cd rock -vagrant up -``` - -== Physical/Virtual/Non-Vagrant - -The system you run this on should have at least 2 network interfaces and more than 8GB of RAM, with an OS (RHEL or CentOS 7) already installed. See the <> section for details. -``` -sudo yum update -y && reboot -sudo yum install -y epel-release -sudo yum install -y git ansible -git clone https://github.com/rocknsm/rock.git -cd rock/ansible -sudo ./generate_defaults.sh -sudo ./deploy_rock.sh -``` - -== Usage - -=== Start / Stop / Status -Accomplished with `rock_stop`, `rock_start`, and `rock_status`. - -TIP: These may need to be prefaced with /usr/local/bin/ depending on your PATH. - -`sudo rock_stop` -``` -[root@simplerockbuild ~]# rock_stop -Stopping Bro... -stopping worker-1-1 ... -stopping worker-1-2 ... -stopping proxy-1 ... -stopping manager ... -Stopping Logstash... -Stopping Kibana... -Stopping Elasticsearch... -Stopping Kafka... -Stopping Zookeeper... -``` - -`sudo rock_start` -``` -[root@simplerockbuild ~]# rock_start -Starting Zookeeper... -Active: active (running) since Wed 2015-12-02 17:12:02 UTC; 5s ago -Starting Elasticsearch... -Active: active (running) since Wed 2015-12-02 17:12:07 UTC; 5s ago -Starting Kafka... -Active: active (running) since Wed 2015-12-02 17:12:12 UTC; 5s ago -Starting Logstash... -Active: active (running) since Wed 2015-12-02 17:12:17 UTC; 5s ago -Starting Kibana... -Active: active (running) since Wed 2015-12-02 17:12:22 UTC; 5s ago -Starting Bro... -removing old policies in /data/bro/spool/installed-scripts-do-not-touch/site ... -removing old policies in /data/bro/spool/installed-scripts-do-not-touch/auto ... -creating policy directories ... -installing site policies ... -generating cluster-layout.bro ... -generating local-networks.bro ... -generating broctl-config.bro ... -generating broctl-config.sh ... -updating nodes ... -manager scripts are ok. -proxy-1 scripts are ok. -worker-1-1 scripts are ok. -worker-1-2 scripts are ok. -starting manager ... -starting proxy-1 ... -starting worker-1-1 ... -starting worker-1-2 ... -Getting process status ... -Getting peer status ... -Name Type Host Status Pid Peers Started -manager manager localhost running 20389 ??? 02 Dec 17:12:34 -proxy-1 proxy localhost running 20438 ??? 02 Dec 17:12:35 -worker-1-1 worker localhost running 20484 ??? 02 Dec 17:12:36 -worker-1-2 worker localhost running 20485 ??? 02 Dec 17:12:36 -``` - -`sudo rock_status` -``` -[root@simplerockbuild ~]# /usr/local/bin/rock_status -✓ Check each monitor interface is live -✓ Check for interface errors -✓ Check monitor interface for tx packets -✓ Check PF_RING settings -✓ Check that broctl is running -✓ Check for bro-detected packet loss -✓ Check that zookeeper is running -✓ Check that zookeeper is listening -✓ Check that client can connect to zookeeper -✓ Check that kafka is running -✓ Check that kafka is connected to zookeeper -✓ Check that logstash is running -✓ Check that elasticsearch is running -✓ Check that kibana is running - -14 tests, 0 failures -``` - -== Basic Troubleshooting -=== Functions Check: -``` -# After the initial build, the ES cluster will be yellow because the marvel index will think it's missing a replica. Run this to fix this issue. This job will run from cron just after midnight every day. -/usr/local/bin/es_cleanup.sh 2>&1 > /dev/null - -# Check to see that the ES cluster says it's green: -curl -s localhost:9200/_cluster/health | jq '.' - -# See how many documents are in the indexes. The count should be non-zero. -curl -s localhost:9200/_all/_count | jq '.' - -# You can fire some traffic across the sensor at this point to see if it's collecting. -# NOTE: This requires that you upload your own test PCAP to the box. -sudo tcpreplay -i [your monitor interface] /path/to/a/test.pcap - -# After replaying some traffic, or just waiting a bit, the count should be going up. -curl -s localhost:9200/_all/_count | jq '.' - -# You should have plain text bro logs showing up in /data/bro/logs/current/: -ls -ltr /data/bro/logs/current/ - -# Kafkacat is your kafka swiss army knife. This command will consume the current queue. You should see a non-zero offset. -kafkacat -C -b localhost -t bro_raw -e | wc -l - -# If you haven't loaded kibana already, it should be running on port 5601. This just verifies while you're still on the command line. -sudo netstat -planet | grep node -``` - -=== Key web interfaces: -IPADDRESS = The management interface of the box, or "localhost" if you did the vagrant build. - -http://IPADDRESS - Kibana - - -=== Full Packet Capture -Google's Stenographer is installed and configured in this build. However, it is disabled by default. There are a few reasons for this: First, it can be too much for Vagrant builds on meager hardware. Second, you really need to make sure you've mounted /data over sufficient storage before you start saving full packets. Once you're ready to get nuts, enable and start the service with `systemctl enable stenographer.service` and then `systemctl start stenographer.service`. Stenographer is already stubbed into the `/usr/local/bin/rock_{start,stop,status}` scripts, you just need to uncomment it if you're going to use it. - -=== File Scanning Framework -Emerson Electric Co's File Scanning Framework is installed and configured in this build to analyze files seen by bro that are of specific mime-types, however this service is disabled by default. There are two primary reasons for this: First, just like stenographer FSF can be too much for ROCK builds on meager hardware. Second, you should carefully consider what file types you want to extract and what additional yara rules you want to scan your extracted files with. If you choose to Enable FSF in /etc/rocknsm/config.yml, the default configuration will automatically scan any of the following file types seen by bro and log the results to Elasticsearch. - - application/pdf - - application/vnd.openxmlformats-officedocument.wordprocessingml.document - - application/vnd.openxmlformats-officedocument.spreadsheetml.sheet - - application/vnd.openxmlformats-officedocument.presentationml.presentation - - application/x-dosexec - - application/java-archive - - application/x-java-applet - - application/x-java-jnlp-file - - -== THANKS -This architecture is made possible by the efforts of the Missouri National Guard Cyber Team for donating talent and resources to further development. - - -== Approach - -The Ansible playbook that drives this build strives not to use any external roles or other dependencies. The reasoning behind this is to make the rock playbook a "one-stop" reference for a manual build. This allows users to use the build process as a guide when doing larger scale production roll outs without having to decipher a labyrinth of dependencies. - -Templated config files have comment sections added near key config items with useful info. They don't all have it, but they get added as remembered. diff --git a/docs/guide/SUMMARY.adoc b/docs/guide/SUMMARY.adoc deleted file mode 100644 index 548ca891f..000000000 --- a/docs/guide/SUMMARY.adoc +++ /dev/null @@ -1,4 +0,0 @@ -. link:getting-started.adoc[Getting Started] -. link:release-notes.adoc[Release Notes] -. link:configuration.adoc[Configuration] - diff --git a/docs/guide/configuration.adoc b/docs/guide/configuration.adoc deleted file mode 100644 index f2fb7854c..000000000 --- a/docs/guide/configuration.adoc +++ /dev/null @@ -1,135 +0,0 @@ -= Configuration - -Edit `/etc/rocknsm/config.yml` to suit your needs. For simple all-in-one installs, the main points to configure are - -``` -sudo vim /etc/rocknsm/config.yml -``` - -Generally, most of the options you'll want to change are near the top. The file is fairly well commented. The defaults are auto-generated according to our best guess at your hardware with some simple rules of thumb. That said, you could consider tweaking them according to your performance needs. - -[options="header"] -|=== -| Name | Default | Description - -| rock_monifs -| `[eno3, eno4]` -| These interfaces will be configured in Bro, Suricata, Stenographer and our tuning script for passive collection. The default is actually auto-detected from your hardware. It defaults to selecting all ethernet interfaces on the system that are not the default gateway (i.e. the management interface). - -| rock_hostname -| `simplerockbuild` -| This will be used in all places where the hostname is needed. It will also be placed in `/etc/hosts` - -| rock_fqdn -| `simplerockbuild.simplerock.lan` -| This will be set in `/etc/hosts` to provide FQDN lookup within the sensor - -| bro_cpu -| Half of available cores, not to exceed 8 -| This will allocate this many CPUs to bro as worker nodes - -| es_cluster_name -| `rocknsm` -| Elasticsearch will be configured to use this as the cluster name. - -| es_node_name -| _rock_hostname_ -| This will appear in Elasticsearch runtime information as the node name. - -| es_mem -| Half of available memory, rounded down to the nearest gigabyte -| This is used to configure the heap size for Elasticsearch. - -| rock_online_install -| `True` (unless installed from ISO) -| Configures yum and other downloads to pull packages from the Internet. If this is set to `False`, you should have already followed to procedures to make an <> - -| epel_baseurl -| _the default url for EPEL_ -| You can change this to point to a local yum mirror for EPEL if you have one. - -| epel_gpgurl -| _the default url for the EPEL gpg key -| You should change this if your sensor cannot reach the Internet and you have a copy of the EPEL GPG Key on your internal package mirror - -| elastic_baseurl -| _the upstream Elastic yum repository_ -| You can change this to point to a local yum mirror for Elastic if you have one - -| elastic_gpgurl -| _the default url for the Elastic GPG key_ -| You should change this if your sensor cannot reach the Internet and you have a copy of the Elastic GPG Key on your internal package mirror - -| rocknsm_baseurl -| _the online rocknsm repo_ -| You can change this to point to a local yum mirror for ROCK NSM packages if you have one - -| rocknsm_gpgurl -| _the default url for the ROCK NSM key_ -| You should change this if your sensor cannot reach the Internet and you have a copy of the ROCK NSM GPG Key on your internal package mirror - -| rocknsm_local_baseurl -| `/srv/rocknsm` -| This is the location for a local offline yum repository and a local file cache for file archives used during the deployment. Even when installing online this is used to store downloaded files. - -| bro_rockscripts_repo -| _our github repo for bro scripts_ -| You could change this to your own fork of our bro scripts or a local git repository. - -| with_stenographer -| `True` -| Determines whether Stenographer will be installed as the PCAP capture engine. - -| with_bro -| `True` -| Determines whether Bro will be installed and configured. - -| with_suricata -| `True` -| Determines whether Suricata will be installed and configured. - -*NOTE*: While you theoretically _could_ install both Suricata and Snort, I guarantee that the automated deployment will not configure this as you were hoping. - -| with_fsf -| `True` -| Determines whether FSF will be installed and configured. - -| with_snort -| `False` -| Determines whether Snort will be installed and configured. - -*NOTE*: While you theoretically _could_ install both Snort and Suricata, I guarantee that the automated deployment will not configure this as you were hoping. - -| with_pulledpork -| `True` -| Deterimines whether Pulled Pork will be installed and configured for your IDS engine of choice (Suricata or Snort) - -| with_logstash -| `True` -| Determines whether Logstash will be installed and configured for the data pipeline. - -NOTE: I highly recommend leaving logstash even if you don't want the full ELK install. We've put a lot of work into processing the Bro logs with Logstash. If You'd like to split the data out to Splunk or some other SEIM, consider taking the feed after Logstash has processed it. See <> for more discussion on options here. - -| with_elasticsearch -| `True` -| Determines whether Elasticsearch will be installed and configured. See <> for more discussion on options here. - -| with_kibana -| `True` -| Determines whether Kibana will be installed and configured. See <> for more discussion on options here. - -| with_zookeeper -| `True` -| Determines whether Zookeeper will be installed and configured. See <> for more discussion on options here. - -*NOTE*: Zookeeper is required to run Kafka, so if you install Kafka you should leave this to true, or be willing to manually configure Kafka to point to another Zookeeper. - -| with_kafka -| `True` -| Determines whether Kafka will be installed and configured. If `False`, the Bro configuration will be adjusted to disable Kafka logging. - -| with_nginx -| `True` -| Determines whether Nginx will be installed and configured as the Kibana proxy. Nginx provides a control point that can provide for authentication enforcement to access Kibana. - -|=== diff --git a/docs/guide/getting-started.adoc b/docs/guide/getting-started.adoc deleted file mode 100644 index b78a75ca5..000000000 --- a/docs/guide/getting-started.adoc +++ /dev/null @@ -1,134 +0,0 @@ -= Getting Started with ROCK 2.1 - -In an effort to get this into hands of people that will break it and or make it useful, I'm sharing this ISO and some notes about how to use it. Note this is a work in progress, and I will build upon these notes to make what will ultimately be the release notes. - -Last caveat, there's nothing secret here. Everything on the ISO is available in a repo, including the build scripts. I'm not going to go into how to build this, but a curious little rhino could likely figure it out without too much trouble poking around the source tree. - -== TL;DR; - -Download the ISO indicated in <>, complete the installation and reboot. Upon login, run `/opt/rocknsm/rock/ansible/deploy_rock.sh` to accept all the defaults. - -Read on for more details and configuration options. - -== Ch-cha-cha-changes - -> Turn and face the change... - -== Installation - -=== Using the ISO - -Download the ISO from the https://github.com/rocknsm/rock/releases[Releases page] - -=== Install in VM Environment - -This section is based on using VMware Fusion on a Mac, but these steps provide a general template. I've tested the ISO booting mostly in a VMware VM, which uses BIOS. I've also burned it to a USB thumbdrive (I used the 16 GB USB3 from MicroCenter) and installed it in BIOS mode on my home test sensor. - -* Prep Install Media -** `dd` command via terminal -``` -diskutil list -diskutil unmountDisk /dev/disk# -sudo dd bs=8M if=path/to/rockiso of=/dev/disk# -``` - -NOTE: http://etcher.io[etcher.io] is a solid gui-based burning utility - -* New VM Setup - -** in the top left corner click add > new... then custom machine -** select the Linux > RedHat Enterprise 64 template -** create new virtual disk -** name your VM, save - -Lets customize some settings, change based on hardware available. - -* Processors & Memory -** Processors - 4 cores -** Memory - 8192MB (8GB) - -* Hard Disk -** increase the disk to 20GB -** customize settings -** save as name - -* Network Adapter -** By default the vm is created with one interface - this will be for management. -** lets add a second (listening) interface: -*** add device (top right), net adapter, add, “private to my mac” - -* Boot device - -** click CD/DVD (IDE) -** check the "Connect CD/DVD Drive" box -** expand advanced options and browse to the latest ROCK iso - -* Install - -Once the above changes are made, we're ready to install: - -* click the "Start Up" button while holding the `esc` key -* hit `tab` for full config options -** add the following values, speparated by spaces: + -*** `biosdevname=0` + -*** `net.ifnames=0` This will ensure you get interface names like `eth0`. If you have physical hardware, I _highly_ recommend that you do not use this function. + -*** `vga=773` + -* ENTER, and ROCK install script will install -* create _*admin*_ user acct -* REBOOT when install process is complete - -TIP: The `root` account is locked by default and `rockadmin` has `sudo` access. - -You're now ready for <> - -=== Install from the repo - -You can also clone the https://github.com/rocknsm/rock/[ROCK] repository. The instructions for the ISO above use a snapshot of the `devel` repo. You can clone this repo and simply run the `/opt/rocknsm/rock/ansible/generate_defaults.sh` script with `sudo`. This will generate the file `/etc/rocknsm/config.yml`. - -You're now ready for <> - -=== Updating - -NOTE: VMware Fusion will allow local ssh, while Virtualbox will require port forwarding - -Log in with the admin credentials used during the install process, and lets get this box current: -``` -sudo yum update -y && reboot -sudo yum install -y epel-release -sudo yum install -y git ansible -git clone https://github.com/rocknsm/rock.git -cd rock -sudo ./generate_defaults.sh -``` - -=== Configuration - - -If you wish to run an offline install (the ISO sets you up for this already) edit `/etc/rocknsm/config.yml` and change the following setting as shown: - -``` -rock_online_install: False -``` - -If this value is set to `True`, Ansible will configure your system for the yum repositories listed and pull packages and git repos directly from the URLs given. You could easily point this to local mirrors, if needed. - -If this value is set to `False`, Ansible will look for the cached files in `/srv/rocknsm`. There is another script called `offline-snapthot.sh` that will create the necessary repository and file structure. Run this from a system that is Internet connected and copy it to your sensors for offline deployment. - -While you're in there, you can change the auto-detected defaults, such as which interfaces to use, hostname, fqdn, resources to use, etc. You can also disable features altogether at the bottom by simply changing the feature value to `False` as shown below. Don't do this unless you know what you're doing. - -``` -with_nginx: False <1> -``` -<1> This disables nginx from installing or being configured. Note that it will not remove it if it is already present. - -Once you've completed flipping the bits as you see fit, simply run `/opt/rocknsm/rock/ansible/deploy_rock.sh`. If everything is well, this should install all the components and give you a success banner. - -== Known Issues - -There's some listed on GitHub. - -. Best practice custom partitioning? -. Insufficient documentation -. Still doing integration work with FSF -. Still working on some additional health checks -. What have you found??? diff --git a/docs/guide/release-notes.adoc b/docs/guide/release-notes.adoc deleted file mode 100644 index e6201da16..000000000 --- a/docs/guide/release-notes.adoc +++ /dev/null @@ -1,25 +0,0 @@ -= Release Notes - -Some of the biggest changes with ROCK 2.0 are upgrading all the software to the latest versions. Here's a list. - -[options="header"] -|=== -| Software | Version | Notes -| CentOS | 7.3 (1611) | -| Bro | 2.5 | Plugins for kafka output and af_packet capture -| Suricata | 3.1.3 | _This is now the default signature-based IDS_ -| Snort | 2.9.8.3 | _This is now an optional replacement for suricata_ -| Stenographer | Git 12106b | -| Kafka | 0.10.0.0 | -| Elasticsearch | 5.1.1 | -| Logstash | 5.1.1 | -| Kibana | 5.1.1 | -| Ansible | 2.2.0.0 | -|=== - -On top of software updates, we also changed the deployment mechanism to using Ansible as the primary mechanism. We did this for a few reasons: I used it for one of my full-time projects, it's super lightweight and available in EPEL, doesn't require an agent, super easy to understand. I'm hoping that ultimately this makes the platform more approachable to a wider community and better suitable to offline or isolated install environments, which I've frequently encountered for sensor networks. - -On that last note, we now have an ISO that _should_ contain everything you need to deploy. The ISO is merely a snapshot of packages available at the time and latest snapshot of various Git repositories. - - - diff --git a/etc/config.yml b/etc/config.yml new file mode 100644 index 000000000..066336cf8 --- /dev/null +++ b/etc/config.yml @@ -0,0 +1,25 @@ + +--- +############################################################################### +# :::==== :::==== :::===== ::: === :::= === :::=== :::======= # +# ::: === ::: === ::: ::: === :::===== ::: ::: === === # +# ======= === === === ====== ======== ===== === === === # +# === === === === === === === === ==== === === === # +# === === ====== ======= === === === === ====== === === # +############################################################################### +# This configuration file can be used to override any of the installation +# variables that affect the deployment of RockNSM. Take care when modifying +# these options. The defaults should be used unless you really know what you +# are doing! + + + +############################################################################### +# NEXT STEP: Deployment +############################################################################### +# Any time the settings in this config file are updated, you will need to re-run +# the deployment scripts to pick up the changes by running: +# +# /usr/local/bin/deploy_rock.sh +# +# For more information refer to the full documentation at: https://rocknsm.io diff --git a/etc/hosts.ini b/etc/hosts.ini new file mode 100644 index 000000000..d22f7f968 --- /dev/null +++ b/etc/hosts.ini @@ -0,0 +1,71 @@ +[rock] +simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local + +[web] +simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local + +[sensors:children] +rock + +[bro:children] +sensors + +[fsf:children] +sensors + +[kafka:children] +sensors + +[stenographer:children] +sensors + +[suricata:children] +sensors + +[zookeeper] +simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local + +[elasticsearch:children] +es_masters +es_data +es_ingest + +[es_masters] +# This group should only ever contain exactly 1 or 3 nodes! +simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local +# Multi-node example # +#elasticsearch0[1:3].simplerock.lan + +[es_data] +simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local +# Multi-node example # +#elasticsearch0[1:4].simplerock.lan + +[es_ingest] +simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local +# Multi-node example # +#elasticsearch0[1:4].simplerock.lan + +[elasticsearch:vars] +# Disable all node roles by default +node_master=false +node_data=false +node_ingest=false + +[es_masters:vars] +node_master=true + +[es_data:vars] +node_data=true + +[es_ingest:vars] +node_ingest=true + +[docket:children] +web + +[kibana:children] +web + +[logstash:children] +sensors diff --git a/playbooks/ansible.cfg b/playbooks/ansible.cfg index d16a407fa..a5d579e1c 100644 --- a/playbooks/ansible.cfg +++ b/playbooks/ansible.cfg @@ -9,9 +9,15 @@ [defaults] +# Abort the entire play if a task fails. This is helpful to prevent cascading +# failures when the play continues running on remaining hosts. + +any_errors_fatal = True +inventory = /etc/rocknsm/hosts.ini +roles_path = /usr/share/rock/roles + # some basic default values... -inventory = inventory/all-in-one.ini #library = /usr/share/my_modules/ #module_utils = /usr/share/my_module_utils/ #remote_tmp = ~/.ansible/tmp @@ -54,9 +60,6 @@ inventory = inventory/all-in-one.ini # environment. # gather_timeout = 10 -# additional paths to search for roles in, colon separated -roles_path = roles - # uncomment this to disable SSH key host checking #host_key_checking = False diff --git a/playbooks/auth-mgmt.yml b/playbooks/auth-mgmt.yml new file mode 100644 index 000000000..91397ac39 --- /dev/null +++ b/playbooks/auth-mgmt.yml @@ -0,0 +1,19 @@ +--- + +- hosts: all + become: true + vars_files: + - /etc/rocknsm/config.yml + tasks: + - name: Set authorized keys + authorized_key: + user: "{{ ansible_env.SUDO_USER }}" + state: present + key: "{{ public_keys }}" + + - name: Enable sudo w/o password + lineinfile: + path: /etc/sudoers + state: present + regexp: '^{{ ansible_env.SUDO_USER }}\s' + line: '{{ ansible_env.SUDO_USER }} ALL=(ALL) NOPASSWD: ALL' diff --git a/playbooks/debug.yml b/playbooks/debug.yml index dfac6ae9c..b172da745 100644 --- a/playbooks/debug.yml +++ b/playbooks/debug.yml @@ -10,8 +10,6 @@ - debug: var=es_mem - debug: var=bro_cpu - debug: var=rock_monifs -- debug: var=rock_hostname -- debug: var=rock_fqdn - debug: var=epel_baseurl - debug: var=epel_gpgurl - debug: var=elastic_baseurl diff --git a/playbooks/deploy-rock.yml b/playbooks/deploy-rock.yml index d39207ca4..b4228814a 100644 --- a/playbooks/deploy-rock.yml +++ b/playbooks/deploy-rock.yml @@ -1,1172 +1,90 @@ --- -# Everything that needs to satisfy dependencies should be run in this play -- hosts: all - vars: - rock_debug: "{{ lookup('env', 'DEBUG') }}" - http_proxy: "{{ lookup('env','http_proxy') }}" - https_proxy: "{{ lookup('env', 'https_proxy') }}" - roles: - - { role: sensor-common } - hosts: all - gather_facts: False - vars: - rock_debug: "{{ lookup('env', 'DEBUG') }}" - http_proxy: "{{ lookup('env','http_proxy') }}" - https_proxy: "{{ lookup('env', 'https_proxy') }}" + vars_files: + - /etc/rocknsm/config.yml roles: - - role: stenographer - when: with_stenographer | bool - stenographer_monitor_interfaces: "{{rock_monifs}}" - - role: docket - when: with_docket | bool - docket_install: offline - docket_enable: "{{enable_docket | bool}}" - - tasks: - - name: Apply override settings, if available - include_vars: /etc/rocknsm/config.yml - ignore_errors: true - failed_when: false - - - name: Debug variables - import_tasks: debug.yml - when: rock_debug is defined and rock_debug - - ####################################################### - ############# Install/Remove Packages ################# - ####################################################### - - name: Install packages - yum: - name: "{{ item.pkg }}" - state: "{{ item.state }}" - when: (item.test is undefined) or (item.test) - with_items: - - { pkg: "{{ rocknsm_package_list }}", state: installed } - - - name: Install optional packages - yum: - name: "{{ item.pkg }}" - state: "{{ item.state }}" - when: (item.test is undefined) or (item.test) - with_items: - - { pkg: elasticsearch, test: "{{with_elasticsearch}}", state: installed } - - { pkg: logstash, test: "{{with_logstash}}", state: installed } - - { pkg: kibana, test: "{{with_kibana}}", state: installed } - - { pkg: filebeat, test: "{{with_suricata or with_fsf}}", state: installed } - - { pkg: lighttpd, test: "{{with_lighttpd}}", state: installed } - - { pkg: python2-xkcdpass, test: "{{with_lighttpd}}", state: installed } - - { pkg: bro, test: "{{with_bro}}", state: installed } - - { pkg: bro-plugin-af_packet, test: "{{with_bro}}", state: installed } - - { pkg: bro-plugin-kafka, test: "{{(with_bro and with_kafka)}}", state: installed } - - { pkg: docket, test: "{{with_docket}}", state: installed } - - { pkg: suricata, test: "{{with_suricata}}", state: installed } - - { pkg: suricata-update, test: "{{with_suricata}}", state: installed} - - { pkg: snort, test: "{{with_snort}}", state: installed } - - { pkg: daq, test: "{{with_snort}}", state: installed } - - { pkg: zookeeper, test: "{{with_zookeeper}}", state: installed } - - { pkg: kafka, test: "{{with_kafka}}", state: installed } - - { pkg: kafkacat, test: "{{with_kafka}}", state: installed } - - { pkg: fsf, test: "{{with_fsf}}", state: installed } - - { pkg: postfix, state: installed } - - - name: Ensure cache directory exists - file: - dest: "{{ rock_cache_dir }}" - state: directory - mode: 0755 - - ####################################################### - ################ Configure firewall ################### - ####################################################### - - name: Enable and start firewalld - service: - name: firewalld - enabled: yes - state: started - - - name: Configure firewalld - firewalld: - port: "{{ item[1].port }}" - source: "{{ item[0] }}" - permanent: yes - state: enabled - immediate: yes - when: (item[1].test is undefined) or item[1].test - with_nested: - - "{{ rock_mgmt_nets }}" - - - - { port: "22/tcp" } - - { port: "443/tcp", test: "{{ with_kibana }}" } - - { port: "8443/tcp", test: "{{ with_docket }}" } - - ###################################################### - ############## Configure GeoIP Databases ############# - ###################################################### - - name: Configure GeoIP Update - copy: src=GeoIP.conf dest=/etc/GeoIP.conf - - # There's an issue w/ geoipupdate when env is empty - - name: Update GeoIP - shell: > - if [ "x$HTTP_PROXY" == "x" ]; then - unset HTTP_PROXY; - fi - if [ "x$http_proxy" == "x" ]; then - unset http_proxy; - fi - if [ "x$HTTPS_PROXY" == "x" ]; then - unset HTTPS_PROXY; - fi - if [ "x$https_proxy" == "x" ]; then - unset https_proxy; - fi - /usr/bin/geoipupdate - args: - creates: /usr/share/GeoIP/GeoLiteASNum.dat - register: result - failed_when: (result.rc != 0) and (result.rc != 1) - - - name: Create GeoIP symlinks - file: - src: "/usr/share/GeoIP/{{ item.src }}" - dest: "/usr/share/GeoIP/{{ item.dest }}" - force: yes - state: link - with_items: - - { src: 'GeoLiteCity.dat', dest: 'GeoIPCity.dat' } - - { src: 'GeoLiteCountry.dat', dest: 'GeoIPCountry.dat' } - - { src: 'GeoLiteASNum.dat', dest: 'GeoIPASNum.dat' } - - { src: 'GeoLiteCityv6.dat', dest: 'GeoIPCityv6.dat' } - - ###################################################### - ################### Setup Zookeeper ################## - ###################################################### - - name: Enable and start zookeeper - service: - name: zookeeper - state: "{{ 'started' if enable_zookeeper else 'stopped' }}" - enabled: "{{ enable_zookeeper }}" - when: with_zookeeper - - ###################################################### - ##################### Setup Kafka #################### - ###################################################### - - name: Create Kafka data dir - file: - path: "{{ kafka_data_dir }}" - mode: 0755 - owner: "{{ kafka_user }}" - group: "{{ kafka_group }}" - state: directory - when: with_kafka - - - name: Set kafka retention - lineinfile: - dest: "{{ kafka_config_path }}" - regexp: "log.retention.hours=" - line: "log.retention.hours={{ kafka_retention }}" - state: present - when: with_kafka - - - name: Set kafka data dir - lineinfile: - dest: "{{ kafka_config_path }}" - regexp: "log.dirs=" - line: "log.dirs={{ kafka_data_dir }}" - when: with_kafka - - - name: Enable and start kafka - service: - name: kafka - state: "{{ 'started' if enable_kafka else 'stopped' }}" - enabled: "{{ enable_kafka }}" - when: with_kafka - - ###################################################### - ################# Setup Elasticsearch ################ - ###################################################### - - name: Create Elasticsearch directory - file: - path: "{{ es_data_dir }}" - mode: 0755 - owner: "{{ es_user }}" - group: "{{ es_group }}" - state: directory - when: with_elasticsearch - - - name: Setup elasticsearch config - template: - src: templates/elasticsearch.yml.j2 - dest: /etc/elasticsearch/elasticsearch.yml - owner: root - group: "{{ es_group }}" - mode: 0640 - when: with_elasticsearch - - - name: Create elasticsearch systemd override dir - file: - path: /etc/systemd/system/elasticsearch.service.d - owner: root - group: root - mode: 0755 - state: directory - when: with_elasticsearch - - - name: Enable elasticsearch memlock in service override - copy: - content: "{{ es_memlock_override }}" - dest: /etc/systemd/system/elasticsearch.service.d/override.conf - mode: 0644 - owner: root - group: root - when: with_elasticsearch - - - name: Setup elasticsearch jvm options - template: - src: templates/es-jvm.options.j2 - dest: /etc/elasticsearch/jvm.options - mode: 0640 - owner: root - group: "{{ es_group }}" - when: with_elasticsearch - - - name: Install ROCK Elasticsearch cleanup script - template: - src: templates/es_cleanup.sh.j2 - dest: /usr/local/bin/es_cleanup.sh - mode: 0755 - owner: root - group: root - when: with_elasticsearch - - - name: Set elasticsearch cleanup cron job - cron: - name: "ES maintenance" - cron_file: rocknsm_es_maintenance - hour: 0 - minute: 1 - user: root - job: /usr/local/bin/es_cleanup.sh > /dev/null 2>&1 - when: with_elasticsearch - - # TODO: This has to be started for now so that the configuration ca - # occur. In the future, we can do this in stages and expect a "running config" - # phase to execute. Which will allow an install phase, reboot, come up and - # configure the services live. We're not there yet. - - name: Enable and start Elasticsearch - service: - name: elasticsearch - state: "started" - enabled: "{{ enable_elasticsearch }}" - when: with_elasticsearch - notify: - - es maintenance - - - name: Wait for Elasticsearch to become ready - wait_for: host=localhost port=9200 - when: with_elasticsearch - - ###################################################### - ################### Setup Kibana ##################### - ###################################################### - # TODO: See note above on Elasticsearch - - name: Enable and start Kibana - service: - name: kibana - state: "started" - enabled: "{{ enable_kibana }}" - when: with_kibana - - - name: Check for default mapping template - uri: - method: "GET" - url: http://localhost:9200/_template/default - failed_when: False - register: default_index_template - when: with_elasticsearch - - - name: Load default Elasticsearch mapping template - uri: - method: PUT - url: http://localhost:9200/_template/default - body: "{{ lookup('file', 'es-default-mapping.json')}}" - body_format: json - when: with_elasticsearch and default_index_template.status == 404 - - - name: Configure Kibana templates - uri: - method: PUT - url: http://localhost:9200/_template/kibana-config - body: > - { "order" : 0, "template" : ".kibana", - "settings" : - { "index.number_of_replicas" : "0", - "index.number_of_shards" : "1" }, - "mappings" : { }, "aliases" : { } } - body_format: json - status_code: 200,201 - when: with_kibana - - - name: Add the kibanapw shell function - copy: - src: profile.d-kibanapw.sh - dest: /etc/profile.d/kibanapw.sh - mode: 0644 - owner: root - group: root - when: with_kibana - - - name: Set initial Kibana credentials - shell: > - export kibuser=$(getent passwd 1000 | awk -F: '{print $1}') && \ - export kibpw=$(xkcdpass -a rock) && \ - echo -e "U: ${kibuser}\nP: ${kibpw}" > /home/${kibuser}/KIBANA_CREDS.README && \ - printf "${kibuser}:$(echo ${kibpw} | openssl passwd -apr1 -stdin)\n" | \ - sudo tee -a /etc/lighttpd/rock-htpasswd.user > /dev/null 2>&1 - args: - creates: /etc/lighttpd/rock-htpasswd.user - when: with_kibana and with_lighttpd - - ###################################################### - ############## Setup RockNSM dataflow ################ - ###################################################### - - - name: Download RockNSM Elastic configs - get_url: - url: "{{ rock_dashboards_url }}" - dest: "{{ rock_cache_dir }}/{{ rock_dashboards_filename }}" - mode: 0644 - when: (with_kibana or with_elasticsearch or with_logstash) and rock_online_install - - - name: Extract RockNSM Elastic configs - unarchive: - src: "{{ rock_cache_dir }}/{{ rock_dashboards_filename }}" - dest: /opt/rocknsm - owner: root - group: root - creates: "{{ rock_module_dir }}" - remote_src: yes - when: (with_kibana or with_elasticsearch or with_logstash) - - ############### Elasticsearch Mappings #################### - # TODO customize mappings install per sensor features (i.e. bro, suricata, etc)...maybe - # TODO: Fix `changed_when` - - name: Blanket install/update Elasticsearch mappings - command: ./import-index-templates.sh "{{ es_url }}" - args: - chdir: "{{ rock_module_dir }}/configuration/elasticsearch" - changed_when: false - when: with_elasticsearch - - ############### Logstash Config #################### - - name: Install Bro-Kafka configuration for Logstash - copy: - src: "{{rock_module_dir}}/configuration/logstash/{{item}}" - dest: "/etc/logstash/conf.d/{{item}}" - mode: 0640 - owner: "{{ logstash_user }}" - group: "{{ logstash_group }}" - remote_src: "yes" - when: with_logstash and with_bro and with_kafka - notify: Restart Logstash - with_items: - - logstash-100-input-kafka-bro.conf - - logstash-500-filter-bro.conf - - logstash-999-output-es-bro.conf - - - name: Install Suricata-Kafka configuration for Logstash - copy: - src: "{{rock_module_dir}}/configuration/logstash/{{item}}" - dest: "/etc/logstash/conf.d/{{item}}" - mode: 0640 - owner: "{{ logstash_user }}" - group: "{{ logstash_group }}" - remote_src: "yes" - when: with_logstash and with_suricata and with_kafka - notify: Restart Logstash - with_items: - - logstash-100-input-kafka-suricata.conf - - logstash-500-filter-suricata.conf - - logstash-999-output-es-suricata.conf - - - name: Install FSF-Kafka configuration for Logstash - copy: - src: "{{rock_module_dir}}/configuration/logstash/{{item}}" - dest: "/etc/logstash/conf.d/{{item}}" - mode: 0640 - owner: "{{ logstash_user }}" - group: "{{ logstash_group }}" - remote_src: "yes" - when: with_logstash and with_fsf and with_kafka - notify: Restart Logstash - with_items: - - logstash-100-input-kafka-fsf.conf - - logstash-500-filter-fsf.conf - - logstash-999-output-es-fsf.conf - - - name: Install Parse Failure configuration for Logstash - copy: - src: "{{rock_module_dir}}/configuration/logstash/{{item}}" - dest: "/etc/logstash/conf.d/{{item}}" - mode: 0640 - owner: "{{ logstash_user }}" - group: "{{ logstash_group }}" - remote_src: "yes" - when: with_logstash - notify: Restart Logstash - with_items: - - logstash-998-filter-parsefailures.conf - - logstash-999-output-es-parsefailures.conf - - ############### Kibana Config #################### - - name: Wait for Kibana to be available - wait_for: - port: "{{ kibana_port }}" - when: with_kibana - - - name: Blanket install/update Kibana saved objects - command: ./import-saved-items.sh "{{ kibana_url }}" - args: - chdir: "{{rock_module_dir}}/configuration/kibana" - changed_when: false - # TODO: Fix this ^^ - when: with_kibana - - # - name: Get Kibana Bro index mapping - # uri: - # method: GET - # url: "{{ kibana_url }}/api/saved_objects/index-pattern?per_page=1000" - # when: with_kibana - # register: - - ###################################################### - ################### Setup Logstash ################### - ###################################################### - - # - name: Check for Parse Failure mapping template - # uri: - # method: "GET" - # url: http://localhost:9200/_template/failure_index - # failed_when: False - # register: failure_mapping - # when: (with_elasticsearch and with_logstash) - # - # - name: Load Parse Failure Elasticsearch mapping templates - # uri: - # method: PUT - # url: http://localhost:9200/_template/failure_index - # body: "{{ lookup('file', 'es-parse-failures-mappings.json')}}" - # body_format: json - # when: (with_elasticsearch and with_logstash) and failure_mapping.status == 404 - - - name: Enable and start Logstash - service: - name: logstash - state: "{{ 'started' if enable_logstash else 'stopped' }}" - enabled: "{{ enable_logstash }}" - when: with_logstash - - - ###################################################### - ################### Setup Filebeat ################### - ###################################################### - - name: Add Filebeat configuration file - template: - src: filebeat.yml.j2 - dest: /etc/filebeat/filebeat.yml - notify: Restart Filebeat - - - name: Enable and start Filebeat - service: - name: filebeat - state: "{{ 'started' if enable_filebeat else 'stopped' }}" - enabled: "{{ enable_filebeat }}" - when: with_filebeat - - ####################################################### - ###################### Setup Bro ##################### - ####################################################### - - name: Create bro group - group: - name: "{{ bro_group }}" - state: present - system: yes - when: with_bro - - - name: Create bro user and group - user: - name: "{{ bro_user }}" - comment: "bro service account" - createhome: no - group: "{{ bro_group }}" - home: /var/spool/bro - shell: /sbin/nologin - system: yes - state: present - when: with_bro - - - name: Create Bro directories - file: - path: "{{ item }}" - mode: 0755 - owner: "{{ bro_user }}" - group: "{{ bro_group }}" - state: directory - setype: var_log_t - with_items: - - "{{ bro_data_dir }}" - - "{{ bro_data_dir }}/logs" - - "{{ bro_data_dir }}/spool" - when: with_bro - - - name: Create /opt/bro dir for wandering users - file: - dest: "/opt/bro" - state: directory - when: with_bro - - - name: Create note to wandering users - copy: - dest: "/opt/bro/README.md" - content: | - Hey! Where's my Bro? - ========================= - - RockNSM has aligned the Bro package to be inline with Fedora packaging - guidelines in an effort to push the package upstream for maintenance. - Fedora and EPEL have a great community and we believe others can benefit - from our hard work. - - Here's where you can find your stuff: - - Bro configuration files - ----------------------- - /opt/bro/etc -> /etc/bro - - Bro site scripts - ----------------------- - /opt/bro/share/bro/site -> /usr/share/bro/site - - Bro logs and spool dirs (same as previous ROCK iterations) - ----------------------- - /opt/bro/logs -> /data/bro/logs - /opt/bro/spool -> /data/bro/spool - when: with_bro - - - name: Create Bro node.cfg - template: - src: templates/bro-node.cfg.j2 - dest: "{{ bro_sysconfig_dir }}/node.cfg" - mode: 0644 - owner: root - group: root - when: with_bro - notify: reload bro - - - name: Create broctl.cfg - template: - src: templates/bro-broctl.cfg.j2 - dest: "{{ bro_sysconfig_dir }}/broctl.cfg" - mode: 0644 - owner: root - group: root - when: with_bro - notify: reload bro - - - name: Create bro networks.cfg - copy: - src: bro-networks.cfg - dest: "{{ bro_sysconfig_dir }}/networks.cfg" - mode: 0644 - owner: root - group: root - when: with_bro - notify: reload bro - - - name: Add bro custom scripts dir - file: - path: "{{ bro_site_dir }}/scripts" - owner: root - group: root - mode: 0755 - state: directory - when: with_bro - - - name: Set permissions on broctl scripts dir - file: - path: "{{ bro_prefix }}/share/broctl/scripts" - owner: "{{ bro_user }}" - group: "{{ bro_user }}" - mode: 0755 - state: directory - when: with_bro - - - name: Add README to scripts dir - copy: - src: bro-scripts-readme.txt - dest: "{{ bro_site_dir }}/scripts/README.txt" - mode: 0644 - owner: root - group: root - when: with_bro - - - name: Checkout ROCK Bro scripts - git: - repo: "{{ bro_rockscripts_repo }}" - dest: "{{ bro_site_dir }}/scripts/rock" - version: "{{ bro_rockscripts_branch }}" - when: with_bro and rock_online_install - - - name: Deploy offline ROCK Bro scripts - unarchive: - src: "{{ rock_cache_dir }}/{{ bro_rockscripts_filename }}" - dest: "{{ bro_site_dir }}/scripts/" - owner: root - group: root - creates: "{{ bro_site_dir }}/scripts/rock-scripts-{{ bro_rockscripts_branch | replace ('/', '-') }}" - remote_src: yes - when: with_bro and not rock_online_install - - - name: Symlink offline ROCK bro scripts - file: - src: "{{ bro_site_dir }}/scripts/rock-scripts-{{ bro_rockscripts_branch | replace ('/', '-') }}" - dest: "{{ bro_site_dir }}/scripts/rock" - state: link - force: yes - when: with_bro and not rock_online_install + - common - - name: Update owner for ROCK NSM Bro scripts - file: - path: "{{ bro_site_dir }}/scripts/rock" - owner: "{{ bro_user }}" - group: "{{ bro_group }}" - state: directory - recurse: yes - follow: yes - tags: - - bro_scripts - when: with_bro - - - name: Add ROCK scripts to local.bro - lineinfile: - dest: "{{ bro_site_dir }}/local.bro" - line: "@load scripts/rock # ROCK NSM customizations" - state: present - when: with_bro - - - name: Enable Bro Kafka output to local.bro - lineinfile: - dest: "{{ bro_site_dir }}/local.bro" - line: "@load scripts/rock/plugins/kafka" - state: present - when: with_bro and with_kafka - - - name: Add bro aliases - copy: - src: profile.d-bro.sh - dest: /etc/profile.d/bro.sh - mode: 0644 - owner: root - group: root - when: with_bro - - - name: Add broctl wrapper for admin use - copy: - src: broctl.sh - dest: /usr/sbin/broctl - mode: 0754 - owner: root - group: root - when: with_bro - - - name: Set bro capabilities - capabilities: - path: /usr/bin/bro - capability: "{{ item }}" - state: present - with_items: - - "cap_net_raw+eip" - - "cap_net_admin+eip" - when: with_bro - - - name: Set capstats capabilities - capabilities: - path: /usr/bin/capstats - capability: "{{ item }}" - state: present - with_items: - - "cap_net_raw+eip" - - "cap_net_admin+eip" - when: with_bro - - - name: Set broctl cron - cron: - name: "broctl maintenance" - minute: "*/5" - cron_file: rocknsm_broctl - user: "{{ bro_user }}" - job: "/usr/bin/broctl cron >/dev/null 2>&1" - when: with_bro - - - name: Initialize bro scripts for workers - command: /usr/bin/broctl install - args: - creates: "{{ bro_data_dir }}/spool/broctl-config.sh" - become: yes - become_user: "{{ bro_user }}" - when: with_bro - - - name: Enable and start broctl - service: - name: bro - enabled: "{{ enable_bro }}" - when: with_bro - notify: reload bro - - - ###################################################### - ################## Setup Suricata #################### - ###################################################### - - name: Create Suricata directories - file: - path: "{{ suricata_data_dir }}/" - mode: 0755 - owner: "{{ suricata_user }}" - group: "{{ suricata_group }}" - state: directory - setype: var_log_t - when: with_suricata - - - name: Remove suricata sysconfig file - file: - path: /etc/sysconfig/suricata - state: absent - when: with_suricata - - - name: Install suricata service files - copy: - src: "suricata.service" - dest: "/etc/systemd/system/suricata.service" - mode: 0644 - owner: root - group: root - when: with_suricata - - - name: Setup suricata tmpfiles - copy: - src: "suricata.tmpfiles" - dest: "/etc/tmpfiles.d/suricata.conf" - mode: 0644 - owner: root - group: root - when: with_suricata - - - name: Install suricata overrides - template: - src: templates/suricata_overrides.yaml.j2 - dest: /etc/suricata/rocknsm-overrides.yaml - mode: 0640 - owner: "root" - group: "{{ suricata_group }}" - when: with_suricata - - - name: Create IP reputation config dir - file: - path: /etc/suricata/rules/iplists - state: directory - owner: root - group: root - mode: 0755 - when: with_suricata - - - name: Create Suricata dirs for suricata-update - file: - path: "{{ suricata_var_dir }}/{{ item }}" - state: directory - owner: "{{ suricata_user }}" - group: "{{ suricata_group }}" - mode: 0755 - recurse: "yes" - when: with_suricata - with_items: - - rules - - update - - - name: Set suricata overrides include in main config - lineinfile: - dest: /etc/suricata/suricata.yaml - line: "include: rocknsm-overrides.yaml" - state: present - when: with_suricata - - - name: Enable and start suricata - service: - name: suricata - enabled: "{{ enable_suricata }}" - state: "{{ 'started' if enable_suricata else 'stopped' }}" - when: with_suricata - - - name: Configure logrotate for suricata logs - template: - src: templates/logrotate-suricata.j2 - dest: /etc/logrotate.d/suricata - mode: 0644 - owner: root - group: root - when: with_suricata - - ###################################################### - ############## Setup suricata-update ################ - ###################################################### - - - name: Create local rules source for offline install of Suricata - command: /usr/bin/suricata-update add-source "emerging-threats-offline" "file:///srv/rocknsm/support/emerging.rules-suricata.tar.gz" - args: - creates: /var/lib/suricata/update/sources/emerging-threats-offline.yaml - when: with_suricata_update and not rock_online_install - become: yes - become_user: "{{ suricata_user }}" - - - name: Offline install of Suricata rules - command: /usr/bin/suricata-update update --reload-command "/usr/bin/systemctl kill -s USR2 suricata" - args: - creates: /var/lib/suricata/rules/suricata.rules - when: enable_suricata_update and not rock_online_install - become: yes - become_user: "{{ suricata_user }}" - - - name: Update suricata-update source index - command: /usr/bin/suricata-update update-sources - args: - creates: /var/lib/suricata/update/cache/index.yaml - when: enable_suricata_update and rock_online_install - become: yes - become_user: "{{ suricata_user }}" - - - name: Explicitly enable ET rules for suricata-update online - command: /usr/bin/suricata-update enable-source et/open - args: - creates: /var/lib/suricata/update/sources/et-open.yaml - when: enable_suricata_update and rock_online_install - become: yes - become_user: "{{ suricata_user }}" - - - name: Suricata-update online rules pull - command: /usr/bin/suricata-update update --reload-command "/usr/bin/systemctl kill -s USR2 suricata" - args: - creates: /var/lib/suricata/rules/suricata.rules - when: enable_suricata_update and rock_online_install - become: yes - become_user: "{{ suricata_user }}" - - - name: Cron for suricata-update - cron: - name: "suricata-update" - cron_file: rocknsm_suricata-update - user: "{{ suricata_user }}" - hour: "12" - minute: "0" - job: /usr/bin/suricata-update update --reload-command "/usr/bin/systemctl kill -s USR2 suricata" - > /var/log/suricata-update.log 2>&1 - when: enable_suricata_update - - ####################################################### - ######################## FSF ########################## - ####################################################### - - name: Create FSF data dir - file: - path: "{{ fsf_data_dir }}" - mode: 0755 - owner: "{{ fsf_user }}" - group: "{{ fsf_group }}" - state: directory - setype: var_log_t - when: with_fsf - - - name: Create FSF archive dir - file: - path: "{{ fsf_archive_dir }}" - mode: 0755 - owner: "{{ fsf_user }}" - group: "{{ fsf_group }}" - state: directory - when: with_fsf - - - name: Configure logrotate for fsf logs - template: - src: templates/logrotate-fsf.j2 - dest: /etc/logrotate.d/fsf - mode: 0644 - owner: root - group: root - when: with_fsf - - - name: Configure fsf-server - template: - src: templates/fsf-server-config.j2 - dest: /opt/fsf/fsf-server/conf/config.py - owner: "{{ fsf_user }}" - group: "{{ fsf_group }}" - mode: 0644 - when: with_fsf - - - name: Configure fsf-client - template: - src: templates/fsf-client-config.j2 - dest: /opt/fsf/fsf-client/conf/config.py - owner: "{{ fsf_user }}" - group: "{{ fsf_group }}" - mode: 0644 - when: with_fsf - - - name: Enable and start FSF - service: - name: fsf - state: "{{ 'started' if enable_fsf else 'stopped' }}" - enabled: "{{ enable_fsf }}" - when: with_fsf - - ###################################################### - ################### Setup lighttpd ################### - ###################################################### - - name: Install ROCK lighttpd configuration - template: - src: templates/lighttpd-{{ item }}.j2 - dest: /etc/lighttpd/vhosts.d/{{ item }} - mode: 0644 - owner: root - group: root - when: with_lighttpd and with_kibana - with_items: - - 10-rock-auth.conf - - 10-tls.conf - - 20-rock-vars.conf - - 50-rockproxy.conf - notify: Enable and Restart lighttpd - - - name: Enable lighttpd vhosts - lineinfile: - path: /etc/lighttpd/lighttpd.conf - regexp: '^#?\s*include.*vhosts\.d/.*$' - line: include "/etc/lighttpd/vhosts.d/*.conf" - notify: Enable and Restart lighttpd - when: with_lighttpd - - - name: Enable lighttpd to perform proxy connect - seboolean: - name: httpd_can_network_connect - state: yes - persistent: yes - when: with_lighttpd and with_kibana - - - name: Generate sensor private key - openssl_privatekey: - path: "{{ http_tls_key }}" - when: with_kibana and with_lighttpd - notify: - - Enable and Restart lighttpd - - - name: Generate sensor public key - openssl_publickey: - path: "{{ http_tls_pub }}" - privatekey_path: "{{ http_tls_key }}" - when: with_kibana and with_lighttpd - notify: - - Enable and Restart lighttpd - - - name: Generate sensor CSR - openssl_csr: - path: "{{ http_tls_pub }}.csr" - privatekey_path: "{{ http_tls_key }}" - country_name: US - state_or_province_name: MO - locality_name: St. Louis - organization_name: RockNSM - organizational_unit_name: NSM Ninjas - email_address: info@rocknsm.io - common_name: "{{ rock_fqdn }}" - when: with_kibana and with_lighttpd - notify: - - Enable and Restart lighttpd - - - name: Generate sensor certificate - openssl_certificate: - path: "{{ http_tls_crt }}" - privatekey_path: "{{ http_tls_key }}" - csr_path: "{{ http_tls_pub }}.csr" - provider: selfsigned - when: with_kibana and with_lighttpd - notify: - - Enable and Restart lighttpd - - - name: Combine sensor cert and key - shell: > - cat {{http_tls_key}} {{http_tls_crt}} > {{http_tls_combined}} - args: - creates: "{{ http_tls_combined }}" - when: with_lighttpd - notify: - - Enable and Restart lighttpd - - - name: Generate DH parameters - command: > - openssl dhparam -out {{http_tls_dhparams}} 2048 - args: - creates: "{{http_tls_dhparams}}" - when: with_kibana and with_lighttpd - notify: - - Enable and Restart lighttpd - - ###################################################### - ############### Setup ROCKNSM Scripts ################ - ###################################################### - - name: Install rock start script - copy: - src: rock_start - dest: /usr/local/bin/rock_start - mode: 0700 - owner: root - group: root - - - name: Install rock stop script - copy: - src: rock_stop - dest: /usr/local/bin/rock_stop - mode: 0700 - owner: root - group: root - - - name: Install rock status script - copy: - src: rock_status - dest: /usr/local/bin/rock_status - mode: 0755 - owner: root - group: root - - - name: Install rock control script - copy: - src: rockctl - dest: /usr/local/bin/rockctl - mode: 0755 - owner: root - group: root - - - name: Create rock script symlinks - file: - src: "/usr/local/bin/{{ item.src }}" - dest: "/usr/sbin/{{ item.dest }}" - force: yes - state: link - with_items: - - { src: 'rock_start', dest: 'rock_start' } - - { src: 'rock_stop', dest: 'rock_stop' } - - { src: 'rock_status', dest: 'rock_status' } - - { src: 'rockctl', dest: 'rockctl' } - - - # Training mode / Service mode not needed for AF_PACKET - ###################################################### - ############### ROCKNSM Customization ################ - ###################################################### - - name: Set ROCK NSM Version - copy: - content: "{{ rock_version }}" - dest: /etc/rocknsm/rock-version - mode: 0644 - owner: root - group: root - - - name: Install ROCK NSM /etc/issue - copy: - src: etc-issue.in - dest: /etc/issue.in - mode: 0644 - owner: root - group: root - - - name: NetworkManager ROCK NSM hook - copy: - src: nm-issue-update - dest: /etc/NetworkManager/dispatcher.d/50-rocknsm-issue-update - mode: 0755 - owner: root - group: root - - ####################################################### - ##################### Handlers ###################### - ####################################################### - handlers: - - name: force sync time - command: > - chronyc -a 'burst 3/4'; sleep 5; chronyc -a makestep - - - name: configure monitor interfaces - shell: > - for intf in {{ rock_monifs | join(' ') }}; do - /sbin/ifup ${intf}; - done - - - name: sshd restart - service: name=sshd state=restarted - - - name: es maintenance - command: /usr/local/bin/es_cleanup.sh - - - name: reload bro - service: name=bro state="{{ 'started' if enable_bro else 'stopped' }}" +- hosts: zookeeper + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: zookeeper + when: with_zookeeper | bool - - name: create kafka bro topic - command: > - /opt/kafka/bin/kafka-topics.sh - --zookeeper 127.0.0.1:2181 - --create - --replication-factor 1 - --topic bro-raw - --partitions 1 +- hosts: kafka + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: kafka + when: with_kafka | bool - - name: create kafka suricata topic - command: > - /opt/kafka/bin/kafka-topics.sh - --zookeeper 127.0.0.1:2181 - --create - --replication-factor 1 - --topic suricata-raw - --partitions 1 +- hosts: stenographer + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: stenographer + when: with_stenographer | bool + stenographer_monitor_interfaces: "{{ rock_monifs }}" - - name: create kafka fsf topic - command: > - /opt/kafka/bin/kafka-topics.sh - --zookeeper 127.0.0.1:2181 - --create - --replication-factor 1 - --topic fsf-raw - --partitions 1 +- hosts: bro + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: bro + when: with_bro | bool - - name: reload systemd - systemd: - daemon_reload: yes +- hosts: suricata + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: suricata + when: with_suricata | bool - - name: Restart Logstash - systemd: - name: logstash - state: restarted +- hosts: fsf + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: fsf + when: with_fsf | bool + +- hosts: + - docket + - kibana + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: lighttpd + when: with_lighttpd | bool + +- hosts: + - docket + - stenographer + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: docket + when: with_docket | bool + docket_enable: "{{enable_docket | bool}}" - - name: Restart Filebeat - systemd: - name: filebeat - state: restarted +- hosts: elasticsearch + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: elasticsearch + when: with_elasticsearch | bool - - name: Enable and Restart lighttpd - systemd: - name: lighttpd - state: "{{ 'restarted' if enable_lighttpd else 'stopped' }}" - enabled: "{{ enable_lighttpd }}" - when: with_lighttpd +- hosts: logstash + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: logstash + when: with_logstash | bool - environment: - http_proxy: "{{ http_proxy }}" - https_proxy: "{{ https_proxy }}" - HTTP_PROXY: "{{ http_proxy }}" - HTTPS_PROXY: "{{ https_proxy }}" +- hosts: kibana + vars_files: + - /etc/rocknsm/config.yml + roles: + - role: kibana + when: with_kibana | bool diff --git a/playbooks/files/es-bro-mappings.json b/playbooks/files/es-bro-mappings.json deleted file mode 100644 index d7aa58cb4..000000000 --- a/playbooks/files/es-bro-mappings.json +++ /dev/null @@ -1,367 +0,0 @@ -{ - "order": 20, - "template": "bro-*", - "settings": { - "analysis": { - "analyzer": { - "on_dots": { - "filter": "lowercase", - "type": "custom", - "tokenizer": "dots_tokenizer" - } - }, - "tokenizer": { - "dots_tokenizer": { - "type": "pattern", - "pattern": "\\." - } - } - } - }, - "mappings": { - "_doc": { - "dynamic_templates": [ - { - "ip_addresses": { - "match_mapping_type": "string", - "match": "id_*_h", - "mapping": { - "type": "ip" - } - } - }, - { - "record_uids": { - "match_mapping_type": "string", - "match": "uid", - "mapping": { - "type": "keyword" - } - } - }, - { - "strings_as_keywords": { - "match_mapping_type": "string", - "match": "*", - "mapping": { - "type": "keyword" - } - } - } - ], - "properties": { - "@version": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "notice": { - "properties": { - "dst": { - "type": "ip" - }, - "id_orig_h": { - "type": "ip" - }, - "id_orig_p": { - "type": "long" - }, - "id_resp_h": { - "type": "ip" - }, - "id_resp_p": { - "type": "long" - }, - "src": { - "type": "ip" - }, - "msg": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - }, - "files": { - "properties": { - "rx_hosts": { - "type": "ip" - }, - "tx_hosts": { - "type": "ip" - } - } - }, - "@meta": { - "properties": { - "geoip_orig": { - "dynamic": true, - "properties": { - "asn": { - "type": "keyword", - "norms": false - }, - "as_org": { - "type": "keyword", - "norms": false - }, - "autonomous_system": { - "type": "keyword", - "norms": false - }, - "city_name": { - "type": "keyword", - "norms": false - }, - "continent_code": { - "type": "keyword", - "norms": false - }, - "country_code2": { - "type": "keyword", - "norms": false - }, - "country_code3": { - "type": "keyword", - "norms": false - }, - "country_name": { - "type": "keyword", - "norms": false - }, - "dma_code": { - "type": "integer" - }, - "ip": { - "type": "ip" - }, - "latitude": { - "type": "float" - }, - "location": { - "type": "geo_point" - }, - "longitude": { - "type": "float" - }, - "postal_code": { - "type": "keyword", - "norms": false - }, - "region_code": { - "type": "keyword", - "norms": false - }, - "region_name": { - "type": "keyword", - "norms": false - }, - "timezone": { - "type": "keyword", - "norms": false - } - } - }, - "geoip_resp": { - "dynamic": true, - "properties": { - "asn": { - "type": "keyword", - "norms": false - }, - "as_org": { - "type": "keyword", - "norms": false - }, - "autonomous_system": { - "type": "keyword", - "norms": false - }, - "city_name": { - "type": "keyword", - "norms": false - }, - "continent_code": { - "type": "keyword", - "norms": false - }, - "country_code2": { - "type": "keyword", - "norms": false - }, - "country_code3": { - "type": "keyword", - "norms": false - }, - "country_name": { - "type": "keyword", - "norms": false - }, - "dma_code": { - "type": "integer" - }, - "ip": { - "type": "ip" - }, - "latitude": { - "type": "float" - }, - "location": { - "type": "geo_point" - }, - "longitude": { - "type": "float" - }, - "postal_code": { - "type": "keyword", - "norms": false - }, - "region_code": { - "type": "keyword", - "norms": false - }, - "region_name": { - "type": "keyword", - "norms": false - }, - "timezone": { - "type": "keyword", - "norms": false - } - } - }, - "orig_host": { - "type": "ip" - }, - "resp_host": { - "type": "ip" - }, - "orig_port": { - "type": "long" - }, - "resp_port": { - "type": "long" - }, - "hosts": { - "type": "ip" - } - } - }, - "dhcp": { - "properties": { - "assigned_ip": { - "type": "ip" - } - } - }, - "domain": { - "dynamic": "true", - "properties": { - "name": { - "type": "text", - "analyzer": "on_dots", - "fields": { - "raw": { - "type": "keyword" - } - } - } - } - }, - "dns": { - "properties": { - "query": { - "type": "keyword", - "copy_to": "domain.name" - } - } - }, - "http": { - "properties": { - "user_agent": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "host": { - "type": "keyword", - "copy_to": "domain.name" - } - } - }, - "smtp": { - "properties": { - "user_agent": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - }, - "ssl": { - "properties": { - "server_name": { - "type": "keyword", - "copy_to": "domain.name" - } - } - }, - "syslog": { - "properties": { - "message": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - }, - "sip": { - "properties": { - "user_agent": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - }, - "software": { - "properties": { - "unparsed_version": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - } - } - } - } -} diff --git a/playbooks/files/es-parse-failures-mappings.json b/playbooks/files/es-parse-failures-mappings.json deleted file mode 100644 index 04b105313..000000000 --- a/playbooks/files/es-parse-failures-mappings.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "order": 99, - "template": "parse-failures-*", - "version": 1, - "settings": { - "index.mapping.ignore_malformed": true, - "index.mapping.total_fields.limit": 500, - "index.mapping.coerce": false, - "refresh_interval": "45s", - "number_of_replicas": "0" - }, - "mappings": { - "_default_": { - "dynamic_templates": [ - { - "all_fields": { - "match_mapping_type": "*", - "mapping": { - "type": "keyword", - "include_in_all": false, - "ignore_above": 256, - "index": true, - "norms": false, - "analyzer": "keyword" - } - } - } - ], - "_all": { - "enabled": false - }, - "properties": { - "@timestamp": { - "type": "date" - } - } - } - } -} diff --git a/playbooks/files/logstash-500-filter-bro.conf b/playbooks/files/logstash-500-filter-bro.conf deleted file mode 100644 index bdd9b51eb..000000000 --- a/playbooks/files/logstash-500-filter-bro.conf +++ /dev/null @@ -1,133 +0,0 @@ -filter { - if [@metadata][stage] == "broraw_kafka" { - # Set the timestamp - date { match => [ "ts", "ISO8601" ] } - - # move metadata to new field - mutate { - rename => { - "@stream" => "[@meta][stream]" - "@system" => "[@meta][system]" - "@proc" => "[@meta][proc]" - } - } - - # Rename ID field from file analyzer logs - if [@meta][stream] in ["pe", "x509", "files"] { - mutate { rename => { "id" => "fuid" } } - mutate { - add_field => { "[@meta][event_type]" => "file" } - add_field => { "[@meta][id]" => "%{fuid}" } - } - } else if [@meta][stream] in ["intel", "notice", "notice_alarm", "signatures", "traceroute"] { - mutate { add_field => { "[@meta][event_type]" => "detection" } } - - if [id_orig_h] { - mutate { - convert => { - "id_orig_p" => "integer" - "id_resp_p" => "integer" - } - add_field => { - "[@meta][id]" => "%{uid}" - "[@meta][orig_host]" => "%{id_orig_h}" - "[@meta][orig_port]" => "%{id_orig_p}" - "[@meta][resp_host]" => "%{id_resp_h}" - "[@meta][resp_port]" => "%{id_resp_p}" - } - } - geoip { - source => "id_orig_h" - target => "[@meta][geoip_orig]" - } - geoip { - source => "id_resp_h" - target => "[@meta][geoip_resp]" - } - } - } else if [@meta][stream] in [ "capture_loss", "cluster", "communication", "loaded_scripts", "packet_filter", "prof", "reporter", "stats", "stderr", "stdout" ] { - mutate { add_field => { "[@meta][event_type]" => "diagnostic" } } - } else if [@meta][stream] in ["netcontrol", "netcontrol_drop", "netcontrol_shunt", "netcontrol_catch_release", "openflow"] { - mutate { add_field => { "[@meta][event_type]" => "netcontrol" } } - } else if [@meta][stream] in ["known_certs", "known_devices", "known_hosts", "known_modbus", "known_services", "software"] { - mutate { add_field => { "[@meta][event_type]" => "observations" } } - } else if [@meta][stream] in ["barnyard2", "dpd", "unified2", "weird"] { - mutate { add_field => { "[@meta][event_type]" => "miscellaneous" } } - } else { - - # Network type - mutate { - convert => { - "id_orig_p" => "integer" - "id_resp_p" => "integer" - } - add_field => { - "[@meta][event_type]" => "network" - "[@meta][id]" => "%{uid}" - "[@meta][orig_host]" => "%{id_orig_h}" - "[@meta][orig_port]" => "%{id_orig_p}" - "[@meta][resp_host]" => "%{id_resp_h}" - "[@meta][resp_port]" => "%{id_resp_p}" - } - } - geoip { - source => "id_orig_h" - target => "[@meta][geoip_orig]" - } - geoip { - source => "id_resp_h" - target => "[@meta][geoip_resp]" - } - } - - # Tie related records - mutate { add_field => { "[@meta][related_ids]" => [] }} - if [uid] { - mutate { merge => {"[@meta][related_ids]" => "uid" }} - } - if [fuid] { - mutate { merge => {"[@meta][related_ids]" => "fuid" }} - } - if [related_fuids] { - mutate { merge => { "[@meta][related_ids]" => "related_fuids" }} - } - if [orig_fuids] { - mutate { merge => { "[@meta][related_ids]" => "orig_fuids" }} - } - if [resp_fuids] { - mutate { merge => { "[@meta][related_ids]" => "resp_fuids" }} - } - if [conn_uids] { - mutate { merge => { "[@meta][related_ids]" => "conn_uids" }} - } - if [cert_chain_fuids] { - mutate { merge => { "[@meta][related_ids]" => "cert_chain_fuids" }} - } - - # Nest the entire document - ruby { - code => " - require 'logstash/event' - - logtype = event.get('[@meta][stream]') - ev_hash = event.to_hash - meta_hash = ev_hash['@meta'] - timestamp = ev_hash['@timestamp'] - - # Cleanup duplicate info - #meta_hash.delete('stream') - ev_hash.delete('@meta') - ev_hash.delete('@timestamp') - ev_hash.delete('tags') - - result = { - logtype => ev_hash, - '@meta' => meta_hash, - '@timestamp' => timestamp - } - event.initialize( result ) - " - } - mutate { add_field => {"[@metadata][stage]" => "broraw_kafka" } } - } -} diff --git a/playbooks/files/logstash-500-filter-fsf.conf b/playbooks/files/logstash-500-filter-fsf.conf deleted file mode 100644 index 086900f40..000000000 --- a/playbooks/files/logstash-500-filter-fsf.conf +++ /dev/null @@ -1,23 +0,0 @@ -filter { - if [@metadata][stage] == "fsfraw_kafka" { - if ![tags] { - # Remove kafka_topic field - mutate { remove_field => [ "kafka_topic" ] } - - # Set the timestamp - date { match => [ "Scan Time", "ISO8601" ] } - } - else { - mutate { add_field => { "[@metadata][stage]" => "_parsefailure" } } - } - } - - if [@metadata][stage] == "fsf" { - if ![tags] { - mutate { remove_field => ["path"] } - } - else { - mutate { add_field => { "[@metadata][stage]" => "_parsefailure" } } - } - } -} diff --git a/playbooks/files/logstash-500-filter-suricata.conf b/playbooks/files/logstash-500-filter-suricata.conf deleted file mode 100644 index 8f0e76e1a..000000000 --- a/playbooks/files/logstash-500-filter-suricata.conf +++ /dev/null @@ -1,28 +0,0 @@ -filter { - - if [@metadata][stage] == "suricataraw_kafka" { - - if ![tags] { - - # Remove kafka_topic field - mutate { - remove_field => [ "kafka_topic" ] - } - - # Set the timestamp - date { match => [ "timestamp", "ISO8601" ] } - } else { - mutate { add_field => { "[@metadata][stage]" => "_parsefailure" } } - } - } - - if [@metadata][stage] == "suricata_eve" { - # Tags will determine if there is some sort of parse failure - if ![tags] { - mutate { remove_field => ["path"] } - } - else { - mutate { add_field => { "[@metadata][stage]" => "_parsefailure" } } - } - } -} diff --git a/playbooks/files/logstash-998-filter-parsefailures.conf b/playbooks/files/logstash-998-filter-parsefailures.conf deleted file mode 100644 index d7186cdf5..000000000 --- a/playbooks/files/logstash-998-filter-parsefailures.conf +++ /dev/null @@ -1,13 +0,0 @@ -filter { - # In case [tags] are being used for something else and or geoip lookup failures, then we do NOT want to just assume [tags] means something we don't want to index. - # However, we also do NOT want to perform the expensive multi or statement on everything that does not have [tags] - if [tags] { - if "_parsefailure" in [tags] or "_jsonparsefailure" in [tags] or "_grokparsefailure" in [tags] or "_dissectfailure" in [tags] or "_groktimeout" in [tags] or "_rubyexception" in [tags] or "_dateparsefailure" in [tags] or "_jdbcstreamingfailure" in [tags] or "_elasticsearch_lookup_failure" in [tags] or "_urldecodefailure" in [tags] or "_csvparsefailure" in [tags] or "_xmlparsefailure" in [tags] { - mutate { - update => { - "[@metadata][stage]" => "_parsefailure" - } - } - } - } -} diff --git a/playbooks/files/logstash-999-output-es-bro.conf b/playbooks/files/logstash-999-output-es-bro.conf deleted file mode 100644 index 44ea66946..000000000 --- a/playbooks/files/logstash-999-output-es-bro.conf +++ /dev/null @@ -1,16 +0,0 @@ -output { - if [@metadata][stage] == "broraw_kafka" { - kafka { - codec => json - topic_id => "bro-%{[@meta][event_type]}" - bootstrap_servers => "127.0.0.1:9092" - } - - elasticsearch { - hosts => ["127.0.0.1"] - index => "bro-%{[@meta][event_type]}-%{+YYYY.MM.dd}" - template => "/opt/rocknsm/rock/playbooks/files/es-bro-mappings.json" - document_type => "_doc" - } - } -} diff --git a/playbooks/files/logstash-999-output-es-fsf.conf b/playbooks/files/logstash-999-output-es-fsf.conf deleted file mode 100644 index 9b288cabf..000000000 --- a/playbooks/files/logstash-999-output-es-fsf.conf +++ /dev/null @@ -1,16 +0,0 @@ -output { - if [@metadata][stage] == "fsfraw_kafka" { - kafka { - codec => json - topic_id => "fsf-clean" - bootstrap_servers => "127.0.0.1:9092" - } - - elasticsearch { - hosts => ["127.0.0.1"] - index => "fsf-%{+YYYY.MM.dd}" - manage_template => false - document_type => "_doc" - } - } -} diff --git a/playbooks/files/logstash-999-output-es-parsefailures.conf b/playbooks/files/logstash-999-output-es-parsefailures.conf deleted file mode 100644 index 127851e57..000000000 --- a/playbooks/files/logstash-999-output-es-parsefailures.conf +++ /dev/null @@ -1,9 +0,0 @@ -output { - if [@metadata][stage] == "_parsefailure" { - elasticsearch { - hosts => ["127.0.0.1"] - index => "parse-failures-%{+YYYY.MM.dd}" - document_type => "_doc" - } - } -} diff --git a/playbooks/files/logstash-999-output-es-suricata.conf b/playbooks/files/logstash-999-output-es-suricata.conf deleted file mode 100644 index a88bae3a1..000000000 --- a/playbooks/files/logstash-999-output-es-suricata.conf +++ /dev/null @@ -1,16 +0,0 @@ -output { - if [@metadata][stage] == "suricataraw_kafka" { - kafka { - codec => json - topic_id => "suricata-clean" - bootstrap_servers => "127.0.0.1:9092" - } - - elasticsearch { - hosts => ["127.0.0.1"] - index => "suricata-%{+YYYY.MM.dd}" - manage_template => false - document_type => "_doc" - } - } -} diff --git a/playbooks/files/nginx.conf b/playbooks/files/nginx.conf deleted file mode 100644 index 3e87b366e..000000000 --- a/playbooks/files/nginx.conf +++ /dev/null @@ -1,37 +0,0 @@ -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - -user nginx; -worker_processes auto; -error_log /var/log/nginx/error.log; -pid /run/nginx.pid; - -# Load dynamic modules. See /usr/share/nginx/README.dynamic. -include /usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /etc/nginx/conf.d/*.conf; -} diff --git a/playbooks/files/profile.d-kibanapw.sh b/playbooks/files/profile.d-kibanapw.sh deleted file mode 100644 index 1b04d47cb..000000000 --- a/playbooks/files/profile.d-kibanapw.sh +++ /dev/null @@ -1,8 +0,0 @@ -# Set passwords -function kibanapw() { if [ $# -lt 2 ]; then echo -e "Usage: kibanapw USER PASSWORD\nUsers will be added to /etc/nginx/htpasswd.users"; else egrep "^${1}:" /etc/nginx/htpasswd.users > /dev/null 2>&1; if [[ $? -eq 0 ]]; then sudo sed -i "/${1}\:/d" /etc/nginx/htpasswd.users; fi; printf "${1}:$(echo ${2} | openssl passwd -apr1 -stdin)\n" | sudo tee -a /etc/nginx/htpasswd.users > /dev/null 2>&1; fi; } - -# Enable Auth -function enable_kibana_auth() { sudo sed -i 's/#auth_basic/auth_basic/g' /etc/nginx/conf.d/rock.conf; } - -# Disable Auth -function disable_kibana_auth() { sudo sed -i 's/auth_basic/#auth_basic/g' /etc/nginx/conf.d/rock.conf; } diff --git a/playbooks/files/pulledpork-disablesid.conf b/playbooks/files/pulledpork-disablesid.conf deleted file mode 100644 index 7e2381aa3..000000000 --- a/playbooks/files/pulledpork-disablesid.conf +++ /dev/null @@ -1,38 +0,0 @@ -# example disablesid.conf V3.1 - -# Example of modifying state for individual rules -# 1:1034,1:9837,1:1270,1:3390,1:710,1:1249,3:13010 - -# Example of modifying state for rule ranges -# 1:220-1:3264,3:13010-3:13013 - -# Comments are allowed in this file, and can also be on the same line -# As the modify state syntax, as long as it is a trailing comment -# 1:1011 # I Disabled this rule because I could! - -# Example of modifying state for MS and cve rules, note the use of the : -# in cve. This will modify MS09-008, cve 2009-0233, bugtraq 21301, -# and all MS00 and all cve 2000 related sids! These support regular expression -# matching only after you have specified what you are looking for, i.e. -# MS00- or cve:, the first section CANNOT contain a regular -# expression (MS\d{2}-\d+) will NOT work, use the pcre: keyword (below) -# for this. -# MS09-008,cve:2009-0233,bugtraq:21301,MS00-\d+,cve:2000-\d+ - -# Example of using the pcre: keyword to modify rulestate. the pcre keyword -# allows for full use of regular expression syntax, you do not need to designate -# with / and all pcre searches are treated as case insensitive. For more information -# about regular expression syntax: http://www.regular-expressions.info/ -# The following example modifies state for all MS07 through MS10 -# pcre:MS(0[7-9]|10)-\d+ - -# Example of modifying state for specific categories entirely (see README.CATEGORIES) -# VRT-web-iis,ET-shellcode,ET-emergingthreats-smtp,Custom-shellcode,Custom-emergingthreats-smtp - -# Any of the above values can be on a single line or multiple lines, when -# on a single line they simply need to be separated by a , -# 1:9837,1:220-1:3264,3:13010-3:13013,pcre:MS(0[0-7])-\d+,MS09-008,cve:2009-0233 - -# The modifications in this file are for sample/example purposes only and -# should not actively be used, you need to modify this file to fit your -# environment. diff --git a/playbooks/files/rock_start b/playbooks/files/rock_start deleted file mode 100644 index f2ce8d2c0..000000000 --- a/playbooks/files/rock_start +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash - -function feature_enabled() { - if grep -qiE "^with_$1: (true|yes)" /etc/rocknsm/config.yml; then - grep -qiE "^enable_$1: (true|yes)" /etc/rocknsm/config.yml; - return $? - else - return 1 - fi -} - -if feature_enabled zookeeper; then - echo "Starting Zookeeper..." - systemctl start zookeeper - sleep 5 - systemctl status zookeeper | egrep "^\s*Active" -fi - -if feature_enabled elasticsearch; then - echo "Starting Elasticsearch..." - systemctl start elasticsearch - sleep 5 - systemctl status elasticsearch | egrep "^\s*Active" -fi - -if feature_enabled kafka; then - echo "Starting Kafka..." - systemctl start kafka - sleep 5 - systemctl status kafka | egrep "^\s*Active" -fi - -if feature_enabled logstash; then - echo "Starting Logstash..." - systemctl start logstash - sleep 5 - systemctl status logstash | egrep "^\s*Active" -fi - -if feature_enabled kibana; then - echo "Starting Kibana..." - systemctl start kibana - sleep 5 - systemctl status kibana | egrep "^\s*Active" -fi - -if feature_enabled suricata; then - echo "Starting Suricata..." - systemctl start suricata - sleep 5 - systemctl status suricata | egrep "^\s*Active" -fi - -if feature_enabled snort; then - echo "Starting Snort..." - systemctl start snortd - sleep 5 - systemctl status snortd | egrep "^\s*Active" -fi - -if feature_enabled bro; then - echo "Starting Bro..." - systemctl start bro - sleep 5 - systemctl status bro | egrep "^\s*Active" -fi - -if feature_enabled stenographer; then - echo "Starting Stenographer..." - systemctl start stenographer - sleep 5 - for item in $(ls /etc/stenographer/config* | awk -F. '/\./ { print $2 }') - do - systemctl status stenographer@${item} | egrep "^\s*Active" | cat <( echo -n " ${item}: ") - - done -fi - -if feature_enabled fsf; then - echo "Starting FSF..." - systemctl start fsf - sleep 5 - systemctl status fsf | egrep "^\s*Active" -fi - -exit 0 diff --git a/playbooks/files/rock_status b/playbooks/files/rock_status deleted file mode 100755 index c9d67c9f6..000000000 --- a/playbooks/files/rock_status +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/bats - -# TODO: Get this information from ansible and/or facter -export ZOOKEEPER_HOST=127.0.0.1 -export ZOOKEEPER_PORT=2181 -export MON_IFS=$(cat /etc/bro/node.cfg | grep interface | \ - sed 's/interface=//; s/.*:://' | sort | uniq | paste -sd " " -) - -function feature_enabled() { - if grep -qiE "^with_$1: (true|yes)" /etc/rocknsm/config.yml; then - grep -qiE "^enable_$1: (true|yes)" /etc/rocknsm/config.yml; - return $? - else - return 1 - fi -} - -#---------------------------------------------------------------------------- -# INTERFACE -#---------------------------------------------------------------------------- - -@test "Check each monitor interface is live" { - local timeout_sec=5 - local timeout_pkt=5 - local results=() - - # Timeout after 5 seconds or 5 packets - for interface in $MON_IFS; do - packets_1=$(cat /sys/class/net/${interface}/statistics/rx_packets) - sleep ${timeout_sec} - packets_2=$(cat /sys/class/net/${interface}/statistics/rx_packets) - packets=$(echo `expr ${packets_2} - ${packets_1}`) - echo "${interface} had ${packets} packets in ${timeout_sec} secs." - [ $packets -gt 0 ] - done -} - -@test "Check for interface errors" { - for interface in $MON_IFS; do - tx_errors=$(cat /sys/class/net/${interface}/statistics/tx_errors) - rx_errors=$(cat /sys/class/net/${interface}/statistics/rx_errors) - count=$(echo `expr ${tx_errors} + ${rx_errors}`) - echo "${interface} had ${count} error types." - echo ">> Check \`ethtool -S ${interface} | awk '/error/' \` << " - [ $count -eq 0 ] - done -} - -@test "Check monitor interface for tx packets" { - for interface in $MON_IFS; do - pkts=$(cat /sys/class/net/${interface}/statistics/tx_packets) - echo "WARNING: Monitor interface ${interface} has sent ${pkts} packets". - [ $pkts -eq 0 ] - done -} - -#---------------------------------------------------------------------------- -# BRO -#---------------------------------------------------------------------------- -if feature_enabled bro; then - -@test "Check that broctl is running" { - # This will fail if a worker is crashed as well - systemctl status bro -} - -@test "Check for bro-detected packet loss" { - local notice_log='/data/bro/logs/current/notice.log' - if [ ! -f "${notice_log}"]; then - skip "No notice.log to check for packet loss errors." - fi - - capture_cnt=$(cat ${notice_log} | grep 'CaptureLoss::Too_Much_Loss'|wc -l) - drop_cnt=$(cat ${notice_log} | grep 'PacketFilter::Dropped_Packets'|wc -l) - - if [ $capture_cnt > 0 ]; then - if [ $drop_cnt > 0 ]; then - echo "Sensor is dropping packets before Bro can process them." - [ $drop_cnt -eq 0 ] - else - echo "Packets are being dropped prior to sensor receiving them." - [ $capture_cnt -eq 0 ] - fi - fi -} - -fi -#---------------------------------------------------------------------------- -# SURICATA -#---------------------------------------------------------------------------- -if feature_enabled suricata; then - -@test "Check that suricata is running" { - systemctl status suricata -} - -fi -#---------------------------------------------------------------------------- -# SNORT -#---------------------------------------------------------------------------- -if feature_enabled snort; then - -@test "Check that snort is running" { - systemctl status snortd -} - -fi -#---------------------------------------------------------------------------- -## FSF -##---------------------------------------------------------------------------- -if feature_enabled fsf; then - -@test "Check that FSF is running" { - systemctl status fsf - -} - -fi - -#---------------------------------------------------------------------------- -# ZOOKEEPER -#---------------------------------------------------------------------------- -if feature_enabled zookeeper; then - -@test "Check that zookeeper is running" { - systemctl status zookeeper -} - -@test "Check that zookeeper is listening" { - ss -lnt | grep ${ZOOKEEPER_PORT} -} - -@test "Check that client can connect to zookeeper" { - echo "" | ncat ${ZOOKEEPER_HOST} ${ZOOKEEPER_PORT} -} - -fi -#---------------------------------------------------------------------------- -# KAFKA -#---------------------------------------------------------------------------- -if feature_enabled kafka; then - -@test "Check that kafka is running" { - systemctl status kafka -} - -@test "Check that kafka is connected to zookeeper" { - kafka_pid=$(systemctl show -p MainPID kafka.service | cut -d= -f2) - echo "kafka_pid: ${kafka_pid}" - kafka_socket=$(sudo ss -ntp | grep "${kafka_pid}" | \ - grep "${ZOOKEEPER_PORT}" | awk '{ print $4 }'| sed 's/::ffff://g') - echo "Kafka socket: ${kafka_socket}" - kafka_conns=$(echo "cons" | ncat ${ZOOKEEPER_HOST} ${ZOOKEEPER_PORT} | \ - grep "${kafka_socket}" ) - echo -e "Kafka Connections: \n ${kafka_conns}" - conn_count=$(echo ${kafka_conns} | wc -l ) - echo "Number of Kafka Conns: ${conn_count}" - # It's possible this might need to be -ge - [ ${conn_count} -eq 1 ] -} - -fi - -#---------------------------------------------------------------------------- -# LOGSTASH -#---------------------------------------------------------------------------- - -if feature_enabled logstash; then - -@test "Check that logstash is running" { - systemctl status logstash -} - -fi -#---------------------------------------------------------------------------- -# ELASTICSEARCH -#---------------------------------------------------------------------------- - -if feature_enabled elasticsearch; then - -@test "Check that elasticsearch is running" { - systemctl status elasticsearch -} - -@test "Check that elasticsearch is green" { - result=$(curl -s http://localhost:9200/_cluster/health | jq '.status') - [ "${result}" == "\"green\"" ] -} - -fi -#---------------------------------------------------------------------------- -# KIBANA -#---------------------------------------------------------------------------- - -if feature_enabled kibana; then - -@test "Check that kibana is running" { - systemctl status kibana -} - -fi diff --git a/playbooks/files/rock_stop b/playbooks/files/rock_stop deleted file mode 100644 index 30c7c74c7..000000000 --- a/playbooks/files/rock_stop +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -function feature_enabled() { - if grep -qiE "^with_$1: (true|yes)" /etc/rocknsm/config.yml; then - grep -qiE "^enable_$1: (true|yes)" /etc/rocknsm/config.yml; - return $? - else - return 1 - fi -} - -if feature_enabled fsf; then - echo "Stopping FSF..." - systemctl stop fsf -fi - -if feature_enabled stenographer; then - echo "Stopping Stenographer..." - systemctl stop stenographer -fi - -if feature_enabled suricata; then - echo "Stopping Suricata..." - systemctl stop suricata -fi - -if feature_enabled snort; then - echo "Stopping Snort..." - systemctl stop snortd -fi - -if feature_enabled bro; then - echo "Stopping Bro..." - systemctl stop bro -fi - -if feature_enabled logstash; then - echo "Stopping Logstash..." - systemctl stop logstash -fi - -if feature_enabled kibana; then - echo "Stopping Kibana..." - systemctl stop kibana -fi - -if feature_enabled elasticsearch; then - echo "Stopping Elasticsearch..." - systemctl stop elasticsearch -fi - -if feature_enabled kafka; then - echo "Stopping Kafka..." - systemctl stop kafka -fi - -if feature_enabled zookeeper; then - echo "Stopping Zookeeper..." - systemctl stop zookeeper -fi - -exit 0 diff --git a/playbooks/files/stenoread b/playbooks/files/stenoread deleted file mode 100644 index 22f2afa31..000000000 --- a/playbooks/files/stenoread +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ "$#" -lt 1 ]; then - cat >&2 <&2 - -for IFACE in ${INTERFACES[@]} -do - export STENOGRAPHER_CONFIG="${STENOGRAPHER_CONFIG_BASE}${IFACE}" - "$STENOCURL" /query \ - -d "$STENOQUERY" \ - --silent \ - --max-time 890 \ - --show-error $HEADERS | - "$TCPDUMP" -r /dev/stdin -s 0 -w ${OUT}/${IFACE}.pcap &>/dev/null -done - -mergecap -w - ${OUT}/*.pcap | tcpdump -r /dev/stdin -s 0 "$@" -rm -rf ${OUT} - diff --git a/playbooks/generate-defaults.yml b/playbooks/generate-defaults.yml index e80d7d773..9a0318815 100644 --- a/playbooks/generate-defaults.yml +++ b/playbooks/generate-defaults.yml @@ -1,11 +1,6 @@ --- - hosts: all tasks: - - name: Apply override settings, if available - include_vars: /etc/rocknsm/config.yml - ignore_errors: true - failed_when: false - - name: Create config directory file: state: directory diff --git a/playbooks/group_vars/all.yml b/playbooks/group_vars/all.yml index d3c94cab0..03c411894 100644 --- a/playbooks/group_vars/all.yml +++ b/playbooks/group_vars/all.yml @@ -1,19 +1,26 @@ %YAML 1.1 --- -rock_version: 2.2.0 +http_proxy: "{{ lookup('env','http_proxy') }}" +https_proxy: "{{ lookup('env', 'https_proxy') }}" + +rock_version: 2.3.0 +elastic: + major_version: 6 + suffix: "x" +elastic_version: "{{ elastic.major_version }}.{{ elastic.suffix }}" rock_online_install: true rock_enable_testing: false rock_disable_offline_repo: false rock_sysctl_file: /etc/sysctl.d/10-ROCK.conf rock_data_dir: /data +rock_conf_dir: /etc/rocknsm rocknsm_dir: /opt/rocknsm rock_data_user: root rock_data_group: root rock_monifs: "{{ ansible_interfaces | difference(['lo', ansible_default_ipv4.interface | default('lo') ])| list }}" -rock_hostname: "{{ inventory_hostname_short }}" -rock_fqdn: "{{ inventory_hostname }}" rock_mgmt_nets: [ "0.0.0.0/0" ] rock_cache_dir: /srv/rocknsm/support +rock_debug: "{{ lookup('env', 'DEBUG') }}" #### Retention Configuration #### elastic_close_interval: 15 @@ -34,7 +41,6 @@ with_suricata_update: true with_logstash: true with_elasticsearch: true with_kibana: true -with_filebeat: true with_zookeeper: true with_kafka: true with_lighttpd: true @@ -51,17 +57,14 @@ enable_suricata_update: true enable_logstash: true enable_elasticsearch: true enable_kibana: true -enable_filebeat: true enable_zookeeper: true enable_kafka: true enable_lighttpd: true enable_fsf: true +enable_filebeat: true rocknsm_package_list: - - java-1.8.0-openjdk-headless - jq - - GeoIP - - GeoIP-update - tcpreplay - tcpdump - bats @@ -72,6 +75,10 @@ rocknsm_package_list: - tmux - nmap-ncat - logrotate + - python-pyOpenSSL + - firewalld + - chrony + - libselinux-python http_tls_crt: /etc/pki/tls/certs/http_tls_crt.pem http_tls_pub: /etc/pki/tls/certs/http_tls_pub.pem @@ -82,15 +89,17 @@ http_tls_dhparams: /etc/pki/tls/misc/http_tls_dhparams.pem docket_web_pemfile: "{{ http_tls_combined }}" docket_web_dhparams: "{{ http_tls_dhparams }}" +ansible_cache: "~/ansible_cache/" + epel_baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/ epel_gpgurl: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 -elastic_baseurl: https://artifacts.elastic.co/packages/6.x/yum +elastic_baseurl: https://artifacts.elastic.co/packages/{{ elastic_version }}/yum elastic_gpgurl: https://artifacts.elastic.co/GPG-KEY-elasticsearch -rocknsm_baseurl: https://packagecloud.io/rocknsm/2_2/el/7/$basearch -rocknsm_srpm_baseurl: https://packagecloud.io/rocknsm/2_2/el/7/SRPMS +rocknsm_baseurl: https://packagecloud.io/rocknsm/2_3/el/7/$basearch +rocknsm_srpm_baseurl: https://packagecloud.io/rocknsm/2_3/el/7/SRPMS rocknsm_testing_baseurl: https://copr-be.cloud.fedoraproject.org/results/@rocknsm/testing/epel-7-$basearch/ -rocknsm_gpgurl: https://packagecloud.io/rocknsm/2_2/gpgkey +rocknsm_gpgurl: https://packagecloud.io/rocknsm/2_3/gpgkey rocknsm_local_baseurl: file:///srv/rocknsm rock_offline_gpgcheck: 0 bro_user: bro @@ -107,7 +116,7 @@ rock_dashboards_repo: https://github.com/rocknsm/rock-dashboards.git rock_dashboards_branch: master rock_dashboards_url: "https://github.com/rocknsm/rock-dashboards/archive/{{ rock_dashboards_branch }}.tar.gz" rock_dashboards_filename: "rock-dashboards_{{ rock_dashboards_branch | replace('/', '-') }}.tar.gz" -rock_dashboards_version: 2.1 +rock_dashboards_version: 2.3 rock_module_dir: "/opt/rocknsm/rock-dashboards-{{ rock_dashboards_branch }}" stenographer_user: stenographer stenographer_group: stenographer @@ -131,9 +140,12 @@ es_user: elasticsearch es_group: elasticsearch es_data_dir: "{{ rock_data_dir }}/elasticsearch" es_cluster_name: rocknsm -es_node_name: "{{ rock_hostname }}" +es_node_name: "{{ ansible_hostname }}" +es_network_host: "{{ '_site:ipv4_' if ( groups['elasticsearch'] | length ) > 1 else '_local:ipv4_' }}" +es_url: "http://{{ groups['elasticsearch'][0] if ( groups['elasticsearch'] | length ) > 1 else '127.0.0.1' }}:9200" +es_action_auto_create_index: true +es_min_master_nodes: "{{ 2 if ( groups['es_masters'] | length ) == 3 else 1 }}" es_mem: "{{ (ansible_memtotal_mb // 1024 // 2) if (ansible_memtotal_mb // 1024) < 64 else 31 }}" -es_url: "http://127.0.0.1:9200" es_log_dir: /var/log/elasticsearch es_memlock_override: | [Service] diff --git a/playbooks/inventory/.gitkeep b/playbooks/inventory/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/playbooks/inventory/all-in-one.ini b/playbooks/inventory/all-in-one.ini deleted file mode 100644 index 1f99bf8de..000000000 --- a/playbooks/inventory/all-in-one.ini +++ /dev/null @@ -1,40 +0,0 @@ -# file: all-in-one - -[rock] -simplerockbuild.simplerock.lan ansible_hostname=127.0.0.1 ansible_connection=local - -[sensors:children] -rock - -[dataplane:children] -rock - -[stenographer:children] -sensors - -[bro:children] -sensors - -[suricata:children] -sensors - -[fsf:children] -sensors - -[kibana:children] -dataplane - -[docket:children] -dataplane - -[elasticsearch:children] -dataplane - -[kafka:children] -dataplane - -[zookeeper:children] -dataplane - -[logstash:children] -dataplane diff --git a/playbooks/inventory/multi-host.ini b/playbooks/inventory/multi-host.ini deleted file mode 100644 index e69de29bb..000000000 diff --git a/playbooks/library/openssl_certificate.py b/playbooks/library/openssl_certificate.py deleted file mode 100644 index 1e5277aa4..000000000 --- a/playbooks/library/openssl_certificate.py +++ /dev/null @@ -1,904 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016-2017, Yanis Guenane -# (c) 2017, Markus Teufelberger -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - -DOCUMENTATION = ''' ---- -module: openssl_certificate -author: - - Yanis Guenane (@Spredzy) - - Markus Teufelberger (@MarkusTeufelberger) -version_added: "2.4" -short_description: Generate and/or check OpenSSL certificates -description: - - "This module allows one to (re)generate OpenSSL certificates. It implements a notion - of provider (ie. C(selfsigned), C(acme), C(assertonly)) for your certificate. - The 'assertonly' provider is intended for use cases where one is only interested in - checking properties of a supplied certificate. - Many properties that can be specified in this module are for validation of an - existing or newly generated certificate. The proper place to specify them, if you - want to receive a certificate with these properties is a CSR (Certificate Signing Request). - It uses the pyOpenSSL python library to interact with OpenSSL." -requirements: - - python-pyOpenSSL >= 0.15 (if using C(selfsigned) or C(assertonly) provider) - - acme-tiny (if using the C(acme) provider) -options: - state: - default: "present" - choices: [ present, absent ] - description: - - Whether the certificate should exist or not, taking action if the state is different from what is stated. - - path: - required: true - description: - - Remote absolute path where the generated certificate file should be created or is already located. - - provider: - required: true - choices: [ 'selfsigned', 'local', 'assertonly', 'acme' ] - description: - - Name of the provider to use to generate/retrieve the OpenSSL certificate. - The C(assertonly) provider will not generate files and fail if the certificate file is missing. - - force: - default: False - type: bool - description: - - Generate the certificate, even if it already exists. - - csr_path: - description: - - Path to the Certificate Signing Request (CSR) used to generate this certificate. This is not required in C(assertonly) mode. - - cacert_path: - description: - - Path to the Certificate Authority certificate that will be used to sign - the CSR. This cert should match the key specified in C(private_key). - - privatekey_path: - description: - - Path to the private key to use when signing the certificate. - - privatekey_passphrase: - description: - - The passphrase for the I(privatekey_path). - - selfsigned_version: - default: 3 - description: - - Version of the C(selfsigned) certificate. Nowadays it should almost always be C(3). - version_added: "2.5" - - selfsigned_digest: - default: "sha256" - description: - - Digest algorithm to be used when self-signing the certificate - - selfsigned_not_before: - description: - - The timestamp at which the certificate starts being valid. The timestamp is formatted as an ASN.1 TIME. - If this value is not specified, certificate will start being valid from now. - aliases: [ selfsigned_notBefore ] - - selfsigned_not_after: - description: - - The timestamp at which the certificate stops being valid. The timestamp is formatted as an ASN.1 TIME. - If this value is not specified, certificate will stop being valid 10 years from now. - aliases: [ selfsigned_notAfter ] - - acme_accountkey_path: - description: - - Path to the accountkey for the C(acme) provider - - acme_challenge_path: - description: - - Path to the ACME challenge directory that is served on U(http://:80/.well-known/acme-challenge/) - - acme_chain: - default: True - description: - - Include the intermediate certificate to the generated certificate - version_added: "2.5" - - signature_algorithms: - description: - - list of algorithms that you would accept the certificate to be signed with - (e.g. ['sha256WithRSAEncryption', 'sha512WithRSAEncryption']). - - issuer: - description: - - Key/value pairs that must be present in the issuer name field of the certificate. - If you need to specify more than one value with the same key, use a list as value. - - issuer_strict: - default: False - type: bool - description: - - If set to True, the I(issuer) field must contain only these values. - version_added: "2.5" - - subject: - description: - - Key/value pairs that must be present in the subject name field of the certificate. - If you need to specify more than one value with the same key, use a list as value. - - subject_strict: - default: False - type: bool - description: - - If set to True, the I(subject) field must contain only these values. - version_added: "2.5" - - has_expired: - default: False - type: bool - description: - - Checks if the certificate is expired/not expired at the time the module is executed. - - version: - description: - - Version of the certificate. Nowadays it should almost always be 3. - - valid_at: - description: - - The certificate must be valid at this point in time. The timestamp is formatted as an ASN.1 TIME. - - invalid_at: - description: - - The certificate must be invalid at this point in time. The timestamp is formatted as an ASN.1 TIME. - - not_before: - description: - - The certificate must start to become valid at this point in time. The timestamp is formatted as an ASN.1 TIME. - aliases: [ notBefore ] - - not_after: - description: - - The certificate must expire at this point in time. The timestamp is formatted as an ASN.1 TIME. - aliases: [ notAfter ] - - - valid_in: - description: - - The certificate must still be valid in I(valid_in) seconds from now. - - key_usage: - description: - - The I(key_usage) extension field must contain all these values. - aliases: [ keyUsage ] - - key_usage_strict: - default: False - type: bool - description: - - If set to True, the I(key_usage) extension field must contain only these values. - aliases: [ keyUsage_strict ] - - extended_key_usage: - description: - - The I(extended_key_usage) extension field must contain all these values. - aliases: [ extendedKeyUsage ] - - extended_key_usage_strict: - default: False - type: bool - description: - - If set to True, the I(extended_key_usage) extension field must contain only these values. - aliases: [ extendedKeyUsage_strict ] - - subject_alt_name: - description: - - The I(subject_alt_name) extension field must contain these values. - aliases: [ subjectAltName ] - - subject_alt_name_strict: - default: False - type: bool - description: - - If set to True, the I(subject_alt_name) extension field must contain only these values. - aliases: [ subjectAltName_strict ] -extends_documentation_fragment: files -notes: - - All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern. - Date specified should be UTC. Minutes and seconds are mandatory. -''' - - -EXAMPLES = ''' -- name: Generate a Self Signed OpenSSL certificate - openssl_certificate: - path: /etc/ssl/crt/ansible.com.crt - privatekey_path: /etc/ssl/private/ansible.com.pem - csr_path: /etc/ssl/csr/ansible.com.csr - provider: selfsigned - -- name: Generate a Let's Encrypt Certificate - openssl_certificate: - path: /etc/ssl/crt/ansible.com.crt - csr_path: /etc/ssl/csr/ansible.com.csr - provider: acme - acme_accountkey_path: /etc/ssl/private/ansible.com.pem - acme_challenge_path: /etc/ssl/challenges/ansible.com/ - -- name: Force (re-)generate a new Let's Encrypt Certificate - openssl_certificate: - path: /etc/ssl/crt/ansible.com.crt - csr_path: /etc/ssl/csr/ansible.com.csr - provider: acme - acme_accountkey_path: /etc/ssl/private/ansible.com.pem - acme_challenge_path: /etc/ssl/challenges/ansible.com/ - force: True - -# Examples for some checks one could use the assertonly provider for: - -# How to use the assertonly provider to implement and trigger your own custom certificate generation workflow: -- name: Check if a certificate is currently still valid, ignoring failures - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - provider: assertonly - has_expired: False - ignore_errors: True - register: validity_check - -- name: Run custom task(s) to get a new, valid certificate in case the initial check failed - command: superspecialSSL recreate /etc/ssl/crt/example.com.crt - when: validity_check.failed - -- name: Check the new certificate again for validity with the same parameters, this time failing the play if it is still invalid - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - provider: assertonly - has_expired: False - when: validity_check.failed - -# Some other checks that assertonly could be used for: -- name: Verify that an existing certificate was issued by the Let's Encrypt CA and is currently still valid - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - provider: assertonly - issuer: - O: Let's Encrypt - has_expired: False - -- name: Ensure that a certificate uses a modern signature algorithm (no SHA1, MD5 or DSA) - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - provider: assertonly - signature_algorithms: - - sha224WithRSAEncryption - - sha256WithRSAEncryption - - sha384WithRSAEncryption - - sha512WithRSAEncryption - - sha224WithECDSAEncryption - - sha256WithECDSAEncryption - - sha384WithECDSAEncryption - - sha512WithECDSAEncryption - -- name: Ensure that the existing certificate belongs to the specified private key - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - privatekey_path: /etc/ssl/private/example.com.pem - provider: assertonly - -- name: Ensure that the existing certificate is still valid at the winter solstice 2017 - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - provider: assertonly - valid_at: 20171221162800Z - -- name: Ensure that the existing certificate is still valid 2 weeks (1209600 seconds) from now - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - provider: assertonly - valid_in: 1209600 - -- name: Ensure that the existing certificate is only used for digital signatures and encrypting other keys - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - provider: assertonly - key_usage: - - digitalSignature - - keyEncipherment - key_usage_strict: true - -- name: Ensure that the existing certificate can be used for client authentication - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - provider: assertonly - extended_key_usage: - - clientAuth - -- name: Ensure that the existing certificate can only be used for client authentication and time stamping - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - provider: assertonly - extended_key_usage: - - clientAuth - - 1.3.6.1.5.5.7.3.8 - extended_key_usage_strict: true - -- name: Ensure that the existing certificate has a certain domain in its subjectAltName - openssl_certificate: - path: /etc/ssl/crt/example.com.crt - provider: assertonly - subject_alt_name: - - www.example.com - - test.example.com -''' - - -RETURN = ''' -filename: - description: Path to the generated Certificate - returned: changed or success - type: string - sample: /etc/ssl/crt/www.ansible.com.crt -''' - - -from random import randint -import datetime -import os - -from ansible.module_utils import crypto as crypto_utils -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native, to_bytes - -try: - import OpenSSL - from OpenSSL import crypto -except ImportError: - pyopenssl_found = False -else: - pyopenssl_found = True - - -class CertificateError(crypto_utils.OpenSSLObjectError): - pass - - -class Certificate(crypto_utils.OpenSSLObject): - - def __init__(self, module): - super(Certificate, self).__init__( - module.params['path'], - module.params['state'], - module.params['force'], - module.check_mode - ) - - self.provider = module.params['provider'] - self.privatekey_path = module.params['privatekey_path'] - self.privatekey_passphrase = module.params['privatekey_passphrase'] - self.csr_path = module.params['csr_path'] - self.cert = None - self.privatekey = None - self.module = module - - def check(self, module, perms_required=True): - """Ensure the resource is in its desired state.""" - - state_and_perms = super(Certificate, self).check(module, perms_required) - - def _validate_privatekey(): - if self.privatekey_path: - ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD) - ctx.use_privatekey(self.privatekey) - ctx.use_certificate(self.cert) - try: - ctx.check_privatekey() - return True - except OpenSSL.SSL.Error: - return False - - if not state_and_perms: - return False - - self.cert = crypto_utils.load_certificate(self.path) - - if self.privatekey_path: - self.privatekey = crypto_utils.load_privatekey( - self.privatekey_path, - self.privatekey_passphrase - ) - return _validate_privatekey() - - return True - - -class SelfSignedCertificate(Certificate): - """Generate the self-signed certificate.""" - - def __init__(self, module): - super(SelfSignedCertificate, self).__init__(module) - self.notBefore = module.params['selfsigned_notBefore'] - self.notAfter = module.params['selfsigned_notAfter'] - self.digest = module.params['selfsigned_digest'] - self.version = module.params['selfsigned_version'] - self.csr = crypto_utils.load_certificate_request(self.csr_path) - self.privatekey = crypto_utils.load_privatekey( - self.privatekey_path, self.privatekey_passphrase - ) - if module.params['provider'] == 'localsigned': - self.cacert = crypto_utils.load_certificate( - module.params['cacert_path'] - ) - else: - self.cacert = None - - def generate(self, module): - - if not os.path.exists(self.privatekey_path): - raise CertificateError( - 'The private key %s does not exist' % self.privatekey_path - ) - - if not os.path.exists(self.csr_path): - raise CertificateError( - 'The certificate signing request file %s does not exist' % self.csr_path - ) - - if not self.check(module, perms_required=False) or self.force: - cert = crypto.X509() - cert.set_serial_number(randint(1000, 99999)) - if self.notBefore: - cert.set_notBefore(self.notBefore) - else: - cert.gmtime_adj_notBefore(0) - if self.notAfter: - cert.set_notAfter(self.notAfter) - else: - # If no NotAfter specified, expire in - # 10 years. 315360000 is 10 years in seconds. - cert.gmtime_adj_notAfter(315360000) - cert.set_subject(self.csr.get_subject()) - - if self.cacert is not None: - cert.set_issuer(self.cacert.get_subject()) - else: - cert.set_issuer(self.csr.get_subject()) - cert.set_version(self.version - 1) - cert.set_pubkey(self.csr.get_pubkey()) - cert.add_extensions(self.csr.get_extensions()) - - cert.sign(self.privatekey, self.digest) - self.cert = cert - - try: - with open(self.path, 'wb') as cert_file: - cert_file.write(crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert)) - except EnvironmentError as exc: - raise CertificateError(exc) - - self.changed = True - - file_args = module.load_file_common_arguments(module.params) - if module.set_fs_attributes_if_different(file_args, False): - self.changed = True - - def dump(self): - - result = { - 'changed': self.changed, - 'filename': self.path, - 'privatekey': self.privatekey_path, - 'csr': self.csr_path, - 'notBefore': self.cert.get_notBefore(), - 'notAfter': self.cert.get_notAfter(), - 'serial_number': self.cert.get_serial_number(), - } - - return result - - -class AssertOnlyCertificate(Certificate): - """validate the supplied certificate.""" - - def __init__(self, module): - super(AssertOnlyCertificate, self).__init__(module) - self.signature_algorithms = module.params['signature_algorithms'] - if module.params['subject']: - self.subject = crypto_utils.parse_name_field(module.params['subject']) - else: - self.subject = [] - self.subject_strict = module.params['subject_strict'] - if module.params['issuer']: - self.issuer = crypto_utils.parse_name_field(module.params['issuer']) - else: - self.issuer = [] - self.issuer_strict = module.params['issuer_strict'] - self.has_expired = module.params['has_expired'] - self.version = module.params['version'] - self.keyUsage = module.params['keyUsage'] - self.keyUsage_strict = module.params['keyUsage_strict'] - self.extendedKeyUsage = module.params['extendedKeyUsage'] - self.extendedKeyUsage_strict = module.params['extendedKeyUsage_strict'] - self.subjectAltName = module.params['subjectAltName'] - self.subjectAltName_strict = module.params['subjectAltName_strict'] - self.notBefore = module.params['notBefore'] - self.notAfter = module.params['notAfter'] - self.valid_at = module.params['valid_at'] - self.invalid_at = module.params['invalid_at'] - self.valid_in = module.params['valid_in'] - self.message = [] - self._sanitize_inputs() - - def _sanitize_inputs(self): - """Ensure inputs are properly sanitized before comparison.""" - - for param in ['signature_algorithms', 'keyUsage', 'extendedKeyUsage', - 'subjectAltName', 'subject', 'issuer', 'notBefore', - 'notAfter', 'valid_at', 'invalid_at']: - - attr = getattr(self, param) - if isinstance(attr, list) and attr: - if isinstance(attr[0], str): - setattr(self, param, [to_bytes(item) for item in attr]) - elif isinstance(attr[0], tuple): - setattr(self, param, [(to_bytes(item[0]), to_bytes(item[1])) for item in attr]) - elif isinstance(attr, tuple): - setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items())) - elif isinstance(attr, dict): - setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items())) - elif isinstance(attr, str): - setattr(self, param, to_bytes(attr)) - - def assertonly(self): - - self.cert = crypto_utils.load_certificate(self.path) - - def _validate_signature_algorithms(): - if self.signature_algorithms: - if self.cert.get_signature_algorithm() not in self.signature_algorithms: - self.message.append( - 'Invalid signature algorithm (got %s, expected one of %s)' % (self.cert.get_signature_algorithm(), self.signature_algorithms) - ) - - def _validate_subject(): - if self.subject: - expected_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in self.subject] - cert_subject = self.cert.get_subject().get_components() - current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in cert_subject] - if (not self.subject_strict and not all(x in current_subject for x in expected_subject)) or \ - (self.subject_strict and not set(expected_subject) == set(current_subject)): - self.message.append( - 'Invalid subject component (got %s, expected all of %s to be present)' % (cert_subject, self.subject) - ) - - def _validate_issuer(): - if self.issuer: - expected_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in self.issuer] - cert_issuer = self.cert.get_issuer().get_components() - current_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in cert_issuer] - if (not self.issuer_strict and not all(x in current_issuer for x in expected_issuer)) or \ - (self.issuer_strict and not set(expected_issuer) == set(current_issuer)): - self.message.append( - 'Invalid issuer component (got %s, expected all of %s to be present)' % (cert_issuer, self.issuer) - ) - - def _validate_has_expired(): - if self.has_expired: - if self.has_expired != self.cert.has_expired(): - self.message.append( - 'Certificate expiration check failed (certificate expiration is %s, expected %s)' % (self.cert.has_expired(), self.has_expired) - ) - - def _validate_version(): - if self.version: - # Version numbers in certs are off by one: - # v1: 0, v2: 1, v3: 2 ... - if self.version != self.cert.get_version() + 1: - self.message.append( - 'Invalid certificate version number (got %s, expected %s)' % (self.cert.get_version() + 1, self.version) - ) - - def _validate_keyUsage(): - if self.keyUsage: - for extension_idx in range(0, self.cert.get_extension_count()): - extension = self.cert.get_extension(extension_idx) - if extension.get_short_name() == b'keyUsage': - keyUsage = [OpenSSL._util.lib.OBJ_txt2nid(keyUsage) for keyUsage in self.keyUsage] - current_ku = [OpenSSL._util.lib.OBJ_txt2nid(usage.strip()) for usage in - to_bytes(extension, errors='surrogate_or_strict').split(b',')] - if (not self.keyUsage_strict and not all(x in current_ku for x in keyUsage)) or \ - (self.keyUsage_strict and not set(keyUsage) == set(current_ku)): - self.message.append( - 'Invalid keyUsage component (got %s, expected all of %s to be present)' % (str(extension).split(', '), self.keyUsage) - ) - - def _validate_extendedKeyUsage(): - if self.extendedKeyUsage: - for extension_idx in range(0, self.cert.get_extension_count()): - extension = self.cert.get_extension(extension_idx) - if extension.get_short_name() == b'extendedKeyUsage': - extKeyUsage = [OpenSSL._util.lib.OBJ_txt2nid(keyUsage) for keyUsage in self.extendedKeyUsage] - current_xku = [OpenSSL._util.lib.OBJ_txt2nid(usage.strip()) for usage in - to_bytes(extension, errors='surrogate_or_strict').split(b',')] - if (not self.extendedKeyUsage_strict and not all(x in current_xku for x in extKeyUsage)) or \ - (self.extendedKeyUsage_strict and not set(extKeyUsage) == set(current_xku)): - self.message.append( - 'Invalid extendedKeyUsage component (got %s, expected all of %s to be present)' % (str(extension).split(', '), - self.extendedKeyUsage) - ) - - def _validate_subjectAltName(): - if self.subjectAltName: - for extension_idx in range(0, self.cert.get_extension_count()): - extension = self.cert.get_extension(extension_idx) - if extension.get_short_name() == b'subjectAltName': - l_altnames = [altname.replace(b'IP Address', b'IP') for altname in - to_bytes(extension, errors='surrogate_or_strict').split(b', ')] - if (not self.subjectAltName_strict and not all(x in l_altnames for x in self.subjectAltName)) or \ - (self.subjectAltName_strict and not set(self.subjectAltName) == set(l_altnames)): - self.message.append( - 'Invalid subjectAltName component (got %s, expected all of %s to be present)' % (l_altnames, self.subjectAltName) - ) - - def _validate_notBefore(): - if self.notBefore: - if self.cert.get_notBefore() != self.notBefore: - self.message.append( - 'Invalid notBefore component (got %s, expected %s to be present)' % (self.cert.get_notBefore(), self.notBefore) - ) - - def _validate_notAfter(): - if self.notAfter: - if self.cert.get_notAfter() != self.notAfter: - self.message.append( - 'Invalid notAfter component (got %s, expected %s to be present)' % (self.cert.get_notAfter(), self.notAfter) - ) - - def _validate_valid_at(): - if self.valid_at: - if not (self.valid_at >= self.cert.get_notBefore() and self.valid_at <= self.cert.get_notAfter()): - self.message.append( - 'Certificate is not valid for the specified date (%s) - notBefore: %s - notAfter: %s' % (self.valid_at, - self.cert.get_notBefore(), - self.cert.get_notAfter()) - ) - - def _validate_invalid_at(): - if self.invalid_at: - if not (self.invalid_at <= self.cert.get_notBefore() or self.invalid_at >= self.cert.get_notAfter()): - self.message.append( - 'Certificate is not invalid for the specified date (%s) - notBefore: %s - notAfter: %s' % (self.invalid_at, - self.cert.get_notBefore(), - self.cert.get_notAfter()) - ) - - def _validate_valid_in(): - if self.valid_in: - valid_in_date = datetime.datetime.utcnow() + datetime.timedelta(seconds=self.valid_in) - valid_in_date = valid_in_date.strftime('%Y%m%d%H%M%SZ') - if not (valid_in_date >= self.cert.get_notBefore() and valid_in_date <= self.cert.get_notAfter()): - self.message.append( - 'Certificate is not valid in %s seconds from now (%s) - notBefore: %s - notAfter: %s' % (self.valid_in, - valid_in_date, - self.cert.get_notBefore(), - self.cert.get_notAfter()) - ) - - for validation in ['signature_algorithms', 'subject', 'issuer', - 'has_expired', 'version', 'keyUsage', - 'extendedKeyUsage', 'subjectAltName', - 'notBefore', 'notAfter', 'valid_at', - 'invalid_at', 'valid_in']: - f_name = locals()['_validate_%s' % validation] - f_name() - - def generate(self, module): - """Don't generate anything - assertonly""" - - self.assertonly() - - if self.privatekey_path and \ - not super(AssertOnlyCertificate, self).check(module, perms_required=False): - self.message.append( - 'Certificate %s and private key %s does not match' % (self.path, self.privatekey_path) - ) - - if len(self.message): - module.fail_json(msg=' | '.join(self.message)) - - def check(self, module, perms_required=True): - """Ensure the resource is in its desired state.""" - - parent_check = super(AssertOnlyCertificate, self).check(module, perms_required) - self.assertonly() - assertonly_check = not len(self.message) - self.message = [] - - return parent_check and assertonly_check - - def dump(self): - - result = { - 'changed': self.changed, - 'filename': self.path, - 'privatekey': self.privatekey_path, - 'csr': self.csr_path, - } - - return result - - -class AcmeCertificate(Certificate): - """Retrieve a certificate using the ACME protocol.""" - - def __init__(self, module): - super(AcmeCertificate, self).__init__(module) - self.accountkey_path = module.params['acme_accountkey_path'] - self.challenge_path = module.params['acme_challenge_path'] - self.use_chain = module.params['acme_chain'] - - def generate(self, module): - - if not os.path.exists(self.privatekey_path): - raise CertificateError( - 'The private key %s does not exist' % self.privatekey_path - ) - - if not os.path.exists(self.csr_path): - raise CertificateError( - 'The certificate signing request file %s does not exist' % self.csr_path - ) - - if not os.path.exists(self.accountkey_path): - raise CertificateError( - 'The account key %s does not exist' % self.accountkey_path - ) - - if not os.path.exists(self.challenge_path): - raise CertificateError( - 'The challenge path %s does not exist' % self.challenge_path - ) - - if not self.check(module, perms_required=False) or self.force: - acme_tiny_path = self.module.get_bin_path('acme-tiny', required=True) - chain = '' - if self.use_chain: - chain = '--chain' - - try: - crt = module.run_command("%s %s --account-key %s --csr %s" - "--acme-dir %s" % (acme_tiny_path, chain, - self.accountkey_path, - self.csr_path, - self.challenge_path), - check_rc=True)[1] - with open(self.path, 'wb') as certfile: - certfile.write(to_bytes(crt)) - except OSError as exc: - raise CertificateError(exc) - - file_args = module.load_file_common_arguments(module.params) - if module.set_fs_attributes_if_different(file_args, False): - self.changed = True - - def dump(self): - - result = { - 'changed': self.changed, - 'filename': self.path, - 'privatekey': self.privatekey_path, - 'accountkey': self.accountkey_path, - 'csr': self.csr_path, - } - - return result - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - path=dict(type='path', required=True), - provider=dict(type='str', choices=['selfsigned', 'localsigned', 'assertonly', 'acme']), - force=dict(type='bool', default=False,), - csr_path=dict(type='path'), - - # General properties of a certificate - cacert_path=dict(type='path'), - privatekey_path=dict(type='path'), - privatekey_passphrase=dict(type='path', no_log=True), - signature_algorithms=dict(type='list'), - subject=dict(type='dict'), - subject_strict=dict(type='bool', default=False), - issuer=dict(type='dict'), - issuer_strict=dict(type='bool', default=False), - has_expired=dict(type='bool', default=False), - version=dict(type='int'), - keyUsage=dict(type='list', aliases=['key_usage']), - keyUsage_strict=dict(type='bool', default=False, aliases=['key_usage_strict']), - extendedKeyUsage=dict(type='list', aliases=['extended_key_usage'], ), - extendedKeyUsage_strict=dict(type='bool', default=False, aliases=['extended_key_usage_strict']), - subjectAltName=dict(type='list', aliases=['subject_alt_name']), - subjectAltName_strict=dict(type='bool', default=False, aliases=['subject_alt_name_strict']), - notBefore=dict(type='str', aliases=['not_before']), - notAfter=dict(type='str', aliases=['not_after']), - valid_at=dict(type='str'), - invalid_at=dict(type='str'), - valid_in=dict(type='int'), - - # provider: selfsigned - selfsigned_version=dict(type='int', default='3'), - selfsigned_digest=dict(type='str', default='sha256'), - selfsigned_notBefore=dict(type='str', aliases=['selfsigned_not_before']), - selfsigned_notAfter=dict(type='str', aliases=['selfsigned_not_after']), - - # provider: acme - acme_accountkey_path=dict(type='path'), - acme_challenge_path=dict(type='path'), - acme_chain=dict(type='bool', default=True), - ), - supports_check_mode=True, - add_file_common_args=True, - ) - - if not pyopenssl_found: - module.fail_json(msg='The python pyOpenSSL library is required') - if module.params['provider'] in ['selfsigned', 'assertonly']: - try: - getattr(crypto.X509Req, 'get_extensions') - except AttributeError: - module.fail_json(msg='You need to have PyOpenSSL>=0.15') - - base_dir = os.path.dirname(module.params['path']) - if not os.path.isdir(base_dir): - module.fail_json( - name=base_dir, - msg='The directory %s does not exist or the file is not a directory' % base_dir - ) - - provider = module.params['provider'] - - if provider == 'selfsigned' or provider == 'localsigned': - certificate = SelfSignedCertificate(module) - elif provider == 'acme': - certificate = AcmeCertificate(module) - else: - certificate = AssertOnlyCertificate(module) - - if module.params['state'] == 'present': - - if module.check_mode: - result = certificate.dump() - result['changed'] = module.params['force'] or not certificate.check(module) - module.exit_json(**result) - - try: - certificate.generate(module) - except CertificateError as exc: - module.fail_json(msg=to_native(exc)) - else: - - if module.check_mode: - result = certificate.dump() - result['changed'] = os.path.exists(module.params['path']) - module.exit_json(**result) - - try: - certificate.remove() - except CertificateError as exc: - module.fail_json(msg=to_native(exc)) - - result = certificate.dump() - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/playbooks/library/openssl_csr.py b/playbooks/library/openssl_csr.py deleted file mode 100644 index 7a354b811..000000000 --- a/playbooks/library/openssl_csr.py +++ /dev/null @@ -1,575 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2017, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - -DOCUMENTATION = ''' ---- -module: openssl_csr -author: "Yanis Guenane (@Spredzy)" -version_added: "2.4" -short_description: Generate OpenSSL Certificate Signing Request (CSR) -description: - - "This module allows one to (re)generate OpenSSL certificate signing requests. - It uses the pyOpenSSL python library to interact with openssl. This module supports - the subjectAltName, keyUsage, extendedKeyUsage, basicConstraints and OCSP Must Staple - extensions." -requirements: - - "python-pyOpenSSL >= 0.15" -options: - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the certificate signing request should exist or not, taking action if the state is different from what is stated. - digest: - required: false - default: "sha256" - description: - - Digest used when signing the certificate signing request with the private key - privatekey_path: - required: true - description: - - Path to the privatekey to use when signing the certificate signing request - privatekey_passphrase: - required: false - description: - - The passphrase for the privatekey. - version: - required: false - default: 1 - description: - - Version of the certificate signing request - force: - required: false - default: False - choices: [ True, False ] - description: - - Should the certificate signing request be forced regenerated by this ansible module - path: - required: true - description: - - Name of the file into which the generated OpenSSL certificate signing request will be written - subject: - required: false - description: - - Key/value pairs that will be present in the subject name field of the certificate signing request. - - If you need to specify more than one value with the same key, use a list as value. - version_added: '2.5' - country_name: - required: false - aliases: [ 'C', 'countryName' ] - description: - - countryName field of the certificate signing request subject - state_or_province_name: - required: false - aliases: [ 'ST', 'stateOrProvinceName' ] - description: - - stateOrProvinceName field of the certificate signing request subject - locality_name: - required: false - aliases: [ 'L', 'localityName' ] - description: - - localityName field of the certificate signing request subject - organization_name: - required: false - aliases: [ 'O', 'organizationName' ] - description: - - organizationName field of the certificate signing request subject - organizational_unit_name: - required: false - aliases: [ 'OU', 'organizationalUnitName' ] - description: - - organizationalUnitName field of the certificate signing request subject - common_name: - required: false - aliases: [ 'CN', 'commonName' ] - description: - - commonName field of the certificate signing request subject - email_address: - required: false - aliases: [ 'E', 'emailAddress' ] - description: - - emailAddress field of the certificate signing request subject - subject_alt_name: - required: false - aliases: [ 'subjectAltName' ] - description: - - SAN extension to attach to the certificate signing request - - This can either be a 'comma separated string' or a YAML list. - - Values should be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName), - C(otherName) and the ones specific to your CA) - - More at U(https://tools.ietf.org/html/rfc5280#section-4.2.1.6) - subject_alt_name_critical: - required: false - aliases: [ 'subjectAltName_critical' ] - description: - - Should the subjectAltName extension be considered as critical - key_usage: - required: false - aliases: [ 'keyUsage' ] - description: - - This defines the purpose (e.g. encipherment, signature, certificate signing) - of the key contained in the certificate. - - This can either be a 'comma separated string' or a YAML list. - key_usage_critical: - required: false - aliases: [ 'keyUsage_critical' ] - description: - - Should the keyUsage extension be considered as critical - extended_key_usage: - required: false - aliases: [ 'extKeyUsage', 'extendedKeyUsage' ] - description: - - Additional restrictions (e.g. client authentication, server authentication) - on the allowed purposes for which the public key may be used. - - This can either be a 'comma separated string' or a YAML list. - extended_key_usage_critical: - required: false - aliases: [ 'extKeyUsage_critical', 'extendedKeyUsage_critical' ] - description: - - Should the extkeyUsage extension be considered as critical - basic_constraints: - required: false - aliases: ['basicConstraints'] - description: - - Indicates basic constraints, such as if the certificate is a CA. - version_added: 2.5 - basic_constraints_critical: - required: false - aliases: [ 'basicConstraints_critical' ] - description: - - Should the basicConstraints extension be considered as critical - version_added: 2.5 - ocsp_must_staple: - required: false - aliases: ['ocspMustStaple'] - description: - - Indicates that the certificate should contain the OCSP Must Staple - extension (U(https://tools.ietf.org/html/rfc7633)). - version_added: 2.5 - ocsp_must_staple_critical: - required: false - aliases: [ 'ocspMustStaple_critical' ] - description: - - Should the OCSP Must Staple extension be considered as critical - - "Warning: according to the RFC, this extension should not be marked - as critical, as old clients not knowing about OCSP Must Staple - are required to reject such certificates - (see U(https://tools.ietf.org/html/rfc7633#section-4))." - version_added: 2.5 -extends_documentation_fragment: files - -notes: - - "If the certificate signing request already exists it will be checked whether subjectAltName, - keyUsage, extendedKeyUsage and basicConstraints only contain the requested values, whether - OCSP Must Staple is as requested, and if the request was signed by the given private key." -''' - - -EXAMPLES = ''' -# Generate an OpenSSL Certificate Signing Request -- openssl_csr: - path: /etc/ssl/csr/www.ansible.com.csr - privatekey_path: /etc/ssl/private/ansible.com.pem - common_name: www.ansible.com - -# Generate an OpenSSL Certificate Signing Request with a -# passphrase protected private key -- openssl_csr: - path: /etc/ssl/csr/www.ansible.com.csr - privatekey_path: /etc/ssl/private/ansible.com.pem - privatekey_passphrase: ansible - common_name: www.ansible.com - -# Generate an OpenSSL Certificate Signing Request with Subject information -- openssl_csr: - path: /etc/ssl/csr/www.ansible.com.csr - privatekey_path: /etc/ssl/private/ansible.com.pem - country_name: FR - organization_name: Ansible - email_address: jdoe@ansible.com - common_name: www.ansible.com - -# Generate an OpenSSL Certificate Signing Request with subjectAltName extension -- openssl_csr: - path: /etc/ssl/csr/www.ansible.com.csr - privatekey_path: /etc/ssl/private/ansible.com.pem - subject_alt_name: 'DNS:www.ansible.com,DNS:m.ansible.com' - -# Force re-generate an OpenSSL Certificate Signing Request -- openssl_csr: - path: /etc/ssl/csr/www.ansible.com.csr - privatekey_path: /etc/ssl/private/ansible.com.pem - force: True - common_name: www.ansible.com - -# Generate an OpenSSL Certificate Signing Request with special key usages -- openssl_csr: - path: /etc/ssl/csr/www.ansible.com.csr - privatekey_path: /etc/ssl/private/ansible.com.pem - common_name: www.ansible.com - key_usage: - - digitalSignature - - keyAgreement - extended_key_usage: - - clientAuth - -# Generate an OpenSSL Certificate Signing Request with OCSP Must Staple -- openssl_csr: - path: /etc/ssl/csr/www.ansible.com.csr - privatekey_path: /etc/ssl/private/ansible.com.pem - common_name: www.ansible.com - ocsp_must_staple: true -''' - - -RETURN = ''' -privatekey: - description: Path to the TLS/SSL private key the CSR was generated for - returned: changed or success - type: string - sample: /etc/ssl/private/ansible.com.pem -filename: - description: Path to the generated Certificate Signing Request - returned: changed or success - type: string - sample: /etc/ssl/csr/www.ansible.com.csr -subject: - description: A list of the subject tuples attached to the CSR - returned: changed or success - type: list - sample: "[('CN', 'www.ansible.com'), ('O', 'Ansible')]" -subjectAltName: - description: The alternative names this CSR is valid for - returned: changed or success - type: list - sample: [ 'DNS:www.ansible.com', 'DNS:m.ansible.com' ] -keyUsage: - description: Purpose for which the public key may be used - returned: changed or success - type: list - sample: [ 'digitalSignature', 'keyAgreement' ] -extendedKeyUsage: - description: Additional restriction on the public key purposes - returned: changed or success - type: list - sample: [ 'clientAuth' ] -basicConstraints: - description: Indicates if the certificate belongs to a CA - returned: changed or success - type: list - sample: ['CA:TRUE', 'pathLenConstraint:0'] -ocsp_must_staple: - description: Indicates whether the certificate has the OCSP - Must Staple feature enabled - returned: changed or success - type: bool - sample: false -''' - -import os - -from ansible.module_utils import crypto as crypto_utils -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native, to_bytes - -try: - import OpenSSL - from OpenSSL import crypto -except ImportError: - pyopenssl_found = False -else: - pyopenssl_found = True - if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000: - # OpenSSL 1.1.0 or newer - MUST_STAPLE_NAME = b"tlsfeature" - MUST_STAPLE_VALUE = b"status_request" - else: - # OpenSSL 1.0.x or older - MUST_STAPLE_NAME = b"1.3.6.1.5.5.7.1.24" - MUST_STAPLE_VALUE = b"DER:30:03:02:01:05" - - -class CertificateSigningRequestError(crypto_utils.OpenSSLObjectError): - pass - - -class CertificateSigningRequest(crypto_utils.OpenSSLObject): - - def __init__(self, module): - super(CertificateSigningRequest, self).__init__( - module.params['path'], - module.params['state'], - module.params['force'], - module.check_mode - ) - self.digest = module.params['digest'] - self.privatekey_path = module.params['privatekey_path'] - self.privatekey_passphrase = module.params['privatekey_passphrase'] - self.version = module.params['version'] - self.subjectAltName = module.params['subjectAltName'] - self.subjectAltName_critical = module.params['subjectAltName_critical'] - self.keyUsage = module.params['keyUsage'] - self.keyUsage_critical = module.params['keyUsage_critical'] - self.extendedKeyUsage = module.params['extendedKeyUsage'] - self.extendedKeyUsage_critical = module.params['extendedKeyUsage_critical'] - self.basicConstraints = module.params['basicConstraints'] - self.basicConstraints_critical = module.params['basicConstraints_critical'] - self.ocspMustStaple = module.params['ocspMustStaple'] - self.ocspMustStaple_critical = module.params['ocspMustStaple_critical'] - self.request = None - self.privatekey = None - - self.subject = [ - ('C', module.params['countryName']), - ('ST', module.params['stateOrProvinceName']), - ('L', module.params['localityName']), - ('O', module.params['organizationName']), - ('OU', module.params['organizationalUnitName']), - ('CN', module.params['commonName']), - ('emailAddress', module.params['emailAddress']), - ] - - if module.params['subject']: - self.subject = self.subject + crypto_utils.parse_name_field(module.params['subject']) - self.subject = [(entry[0], entry[1]) for entry in self.subject if entry[1]] - - def generate(self, module): - '''Generate the certificate signing request.''' - - if not self.check(module, perms_required=False) or self.force: - req = crypto.X509Req() - req.set_version(self.version - 1) - subject = req.get_subject() - for entry in self.subject: - if entry[1] is not None: - # Workaround for https://github.com/pyca/pyopenssl/issues/165 - nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(entry[0])) - OpenSSL._util.lib.X509_NAME_add_entry_by_NID(subject._name, nid, OpenSSL._util.lib.MBSTRING_UTF8, to_bytes(entry[1]), -1, -1, 0) - - extensions = [] - if self.subjectAltName: - altnames = ', '.join(self.subjectAltName) - extensions.append(crypto.X509Extension(b"subjectAltName", self.subjectAltName_critical, altnames.encode('ascii'))) - - if self.keyUsage: - usages = ', '.join(self.keyUsage) - extensions.append(crypto.X509Extension(b"keyUsage", self.keyUsage_critical, usages.encode('ascii'))) - - if self.extendedKeyUsage: - usages = ', '.join(self.extendedKeyUsage) - extensions.append(crypto.X509Extension(b"extendedKeyUsage", self.extendedKeyUsage_critical, usages.encode('ascii'))) - - if self.basicConstraints: - usages = ', '.join(self.basicConstraints) - extensions.append(crypto.X509Extension(b"basicConstraints", self.basicConstraints_critical, usages.encode('ascii'))) - - if self.ocspMustStaple: - extensions.append(crypto.X509Extension(MUST_STAPLE_NAME, self.ocspMustStaple_critical, MUST_STAPLE_VALUE)) - - if extensions: - req.add_extensions(extensions) - - req.set_pubkey(self.privatekey) - req.sign(self.privatekey, self.digest) - self.request = req - - try: - csr_file = open(self.path, 'wb') - csr_file.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, self.request)) - csr_file.close() - except (IOError, OSError) as exc: - raise CertificateSigningRequestError(exc) - - self.changed = True - - file_args = module.load_file_common_arguments(module.params) - if module.set_fs_attributes_if_different(file_args, False): - self.changed = True - - def check(self, module, perms_required=True): - """Ensure the resource is in its desired state.""" - state_and_perms = super(CertificateSigningRequest, self).check(module, perms_required) - - self.privatekey = crypto_utils.load_privatekey(self.privatekey_path, self.privatekey_passphrase) - - def _check_subject(csr): - subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in self.subject] - current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in csr.get_subject().get_components()] - if not set(subject) == set(current_subject): - return False - - return True - - def _check_subjectAltName(extensions): - altnames_ext = next((ext for ext in extensions if ext.get_short_name() == b'subjectAltName'), '') - altnames = [altname.strip() for altname in str(altnames_ext).split(',')] - # apperently openssl returns 'IP address' not 'IP' as specifier when converting the subjectAltName to string - # although it won't accept this specifier when generating the CSR. (https://github.com/openssl/openssl/issues/4004) - altnames = [name if not name.startswith('IP Address:') else "IP:" + name.split(':', 1)[1] for name in altnames] - if self.subjectAltName: - if set(altnames) != set(self.subjectAltName) or altnames_ext.get_critical() != self.subjectAltName_critical: - return False - else: - if altnames: - return False - - return True - - def _check_keyUsage_(extensions, extName, expected, critical): - usages_ext = [ext for ext in extensions if ext.get_short_name() == extName] - if (not usages_ext and expected) or (usages_ext and not expected): - return False - elif not usages_ext and not expected: - return True - else: - current = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage.strip())) for usage in str(usages_ext[0]).split(',')] - expected = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage)) for usage in expected] - return set(current) == set(expected) and usages_ext[0].get_critical() == critical - - def _check_keyUsage(extensions): - return _check_keyUsage_(extensions, b'keyUsage', self.keyUsage, self.keyUsage_critical) - - def _check_extenededKeyUsage(extensions): - return _check_keyUsage_(extensions, b'extendedKeyUsage', self.extendedKeyUsage, self.extendedKeyUsage_critical) - - def _check_basicConstraints(extensions): - return _check_keyUsage_(extensions, b'basicConstraints', self.basicConstraints, self.basicConstraints_critical) - - def _check_ocspMustStaple(extensions): - oms_ext = [ext for ext in extensions if ext.get_short_name() == MUST_STAPLE_NAME and str(ext) == MUST_STAPLE_VALUE] - if OpenSSL.SSL.OPENSSL_VERSION_NUMBER < 0x10100000: - # Older versions of libssl don't know about OCSP Must Staple - oms_ext.extend([ext for ext in extensions if ext.get_short_name() == b'UNDEF' and ext.get_data() == b'\x30\x03\x02\x01\x05']) - if self.ocspMustStaple: - return len(oms_ext) > 0 and oms_ext[0].get_critical() == self.ocspMustStaple_critical - else: - return len(oms_ext) == 0 - - def _check_extensions(csr): - extensions = csr.get_extensions() - return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and - _check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and - _check_ocspMustStaple(extensions)) - - def _check_signature(csr): - try: - return csr.verify(self.privatekey) - except crypto.Error: - return False - - if not state_and_perms: - return False - - csr = crypto_utils.load_certificate_request(self.path) - - return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr) - - def dump(self): - '''Serialize the object into a dictionary.''' - - result = { - 'privatekey': self.privatekey_path, - 'filename': self.path, - 'subject': self.subject, - 'subjectAltName': self.subjectAltName, - 'keyUsage': self.keyUsage, - 'extendedKeyUsage': self.extendedKeyUsage, - 'basicConstraints': self.basicConstraints, - 'ocspMustStaple': self.ocspMustStaple, - 'changed': self.changed - } - - return result - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - digest=dict(default='sha256', type='str'), - privatekey_path=dict(require=True, type='path'), - privatekey_passphrase=dict(type='str', no_log=True), - version=dict(default='1', type='int'), - force=dict(default=False, type='bool'), - path=dict(required=True, type='path'), - subject=dict(type='dict'), - countryName=dict(aliases=['C', 'country_name'], type='str'), - stateOrProvinceName=dict(aliases=['ST', 'state_or_province_name'], type='str'), - localityName=dict(aliases=['L', 'locality_name'], type='str'), - organizationName=dict(aliases=['O', 'organization_name'], type='str'), - organizationalUnitName=dict(aliases=['OU', 'organizational_unit_name'], type='str'), - commonName=dict(aliases=['CN', 'common_name'], type='str'), - emailAddress=dict(aliases=['E', 'email_address'], type='str'), - subjectAltName=dict(aliases=['subject_alt_name'], type='list'), - subjectAltName_critical=dict(aliases=['subject_alt_name_critical'], default=False, type='bool'), - keyUsage=dict(aliases=['key_usage'], type='list'), - keyUsage_critical=dict(aliases=['key_usage_critical'], default=False, type='bool'), - extendedKeyUsage=dict(aliases=['extKeyUsage', 'extended_key_usage'], type='list'), - extendedKeyUsage_critical=dict(aliases=['extKeyUsage_critical', 'extended_key_usage_critical'], default=False, type='bool'), - basicConstraints=dict(aliases=['basic_constraints'], type='list'), - basicConstraints_critical=dict(aliases=['basic_constraints_critical'], default=False, type='bool'), - ocspMustStaple=dict(aliases=['ocsp_must_staple'], default=False, type='bool'), - ocspMustStaple_critical=dict(aliases=['ocsp_must_staple_critical'], default=False, type='bool'), - ), - add_file_common_args=True, - supports_check_mode=True, - ) - - if not pyopenssl_found: - module.fail_json(msg='the python pyOpenSSL module is required') - - try: - getattr(crypto.X509Req, 'get_extensions') - except AttributeError: - module.fail_json(msg='You need to have PyOpenSSL>=0.15 to generate CSRs') - - base_dir = os.path.dirname(module.params['path']) - if not os.path.isdir(base_dir): - module.fail_json(name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir) - - csr = CertificateSigningRequest(module) - - if module.params['state'] == 'present': - - if module.check_mode: - result = csr.dump() - result['changed'] = module.params['force'] or not csr.check(module) - module.exit_json(**result) - - try: - csr.generate(module) - except (CertificateSigningRequestError, crypto_utils.OpenSSLObjectError) as exc: - module.fail_json(msg=to_native(exc)) - - else: - - if module.check_mode: - result = csr.dump() - result['changed'] = os.path.exists(module.params['path']) - module.exit_json(**result) - - try: - csr.remove() - except (CertificateSigningRequestError, crypto_utils.OpenSSLObjectError) as exc: - module.fail_json(msg=to_native(exc)) - - result = csr.dump() - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/playbooks/roles/.gitkeep b/playbooks/roles/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/playbooks/roles/bro/handlers/main.yml b/playbooks/roles/bro/handlers/main.yml deleted file mode 100644 index 384b29322..000000000 --- a/playbooks/roles/bro/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for bro \ No newline at end of file diff --git a/playbooks/roles/bro/tasks/main.yml b/playbooks/roles/bro/tasks/main.yml deleted file mode 100644 index 93a80eef5..000000000 --- a/playbooks/roles/bro/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# tasks file for bro \ No newline at end of file diff --git a/playbooks/roles/docket/meta/main.yml b/playbooks/roles/docket/meta/main.yml deleted file mode 100644 index 722379974..000000000 --- a/playbooks/roles/docket/meta/main.yml +++ /dev/null @@ -1,57 +0,0 @@ -galaxy_info: - author: your name - description: your description - company: your company (optional) - - # If the issue tracker for your role is not on github, uncomment the - # next line and provide a value - # issue_tracker_url: http://example.com/issue/tracker - - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: license (GPLv2, CC-BY, etc) - - min_ansible_version: 1.2 - - # If this a Container Enabled role, provide the minimum Ansible Container version. - # min_ansible_container_version: - - # Optionally specify the branch Galaxy will use when accessing the GitHub - # repo for this role. During role install, if no tags are available, - # Galaxy will use this branch. During import Galaxy will access files on - # this branch. If Travis integration is configured, only notifications for this - # branch will be accepted. Otherwise, in all cases, the repo's default branch - # (usually master) will be used. - #github_branch: - - # - # platforms is a list of platforms, and each platform has a name and a list of versions. - # - # platforms: - # - name: Fedora - # versions: - # - all - # - 25 - # - name: SomePlatform - # versions: - # - all - # - 1.0 - # - 7 - # - 99.99 - - galaxy_tags: [] - # List tags for your role here, one per line. A tag is a keyword that describes - # and categorizes the role. Users find roles by searching for tags. Be sure to - # remove the '[]' above, if you add tags to this list. - # - # NOTE: A tag is limited to a single word comprised of alphanumeric characters. - # Maximum 20 tags per role. - -dependencies: [] - # List your role dependencies here, one per line. Be sure to remove the '[]' above, - # if you add dependencies to this list. \ No newline at end of file diff --git a/playbooks/roles/docket/tasks/install.yml b/playbooks/roles/docket/tasks/install.yml deleted file mode 100644 index eebbe788f..000000000 --- a/playbooks/roles/docket/tasks/install.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: docket | install rocknsm repo - yum_repository: - file: rocknsm - name: "{{ item.name }}" - enabled: yes - description: "{{ item.name }}" - baseurl: "{{ item.baseurl }}" - repo_gpgcheck: 1 - gpgcheck: "{{ item.gpgcheck }}" - gpgkey: - - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_1 - - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2 - sslverify: 1 - sslcacert: /etc/pki/tls/certs/ca-bundle.crt - metadata_expire: 300 - state: present - with_items: - - { name: "rocknsm_2_1", gpgcheck: yes, baseurl: "https://packagecloud.io/rocknsm/2_1/el/7/$basearch" } - - { name: "rocknsm_2_1-source", gpgcheck: no, baseurl: "https://packagecloud.io/rocknsm/2_1/el/7/SRPMS" } - when: "{{ inventory_hostname in groups['docket'] and docket_install == 'yumrepo' }}" - -- name: docket | install packages - yum: - name: - - docket - - lighttpd - state: present - when: inventory_hostname in groups['docket'] diff --git a/playbooks/roles/kafka/handlers/main.yml b/playbooks/roles/kafka/handlers/main.yml deleted file mode 100644 index 1ae126b05..000000000 --- a/playbooks/roles/kafka/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for kafka \ No newline at end of file diff --git a/playbooks/roles/kafka/meta/main.yml b/playbooks/roles/kafka/meta/main.yml deleted file mode 100644 index 722379974..000000000 --- a/playbooks/roles/kafka/meta/main.yml +++ /dev/null @@ -1,57 +0,0 @@ -galaxy_info: - author: your name - description: your description - company: your company (optional) - - # If the issue tracker for your role is not on github, uncomment the - # next line and provide a value - # issue_tracker_url: http://example.com/issue/tracker - - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: license (GPLv2, CC-BY, etc) - - min_ansible_version: 1.2 - - # If this a Container Enabled role, provide the minimum Ansible Container version. - # min_ansible_container_version: - - # Optionally specify the branch Galaxy will use when accessing the GitHub - # repo for this role. During role install, if no tags are available, - # Galaxy will use this branch. During import Galaxy will access files on - # this branch. If Travis integration is configured, only notifications for this - # branch will be accepted. Otherwise, in all cases, the repo's default branch - # (usually master) will be used. - #github_branch: - - # - # platforms is a list of platforms, and each platform has a name and a list of versions. - # - # platforms: - # - name: Fedora - # versions: - # - all - # - 25 - # - name: SomePlatform - # versions: - # - all - # - 1.0 - # - 7 - # - 99.99 - - galaxy_tags: [] - # List tags for your role here, one per line. A tag is a keyword that describes - # and categorizes the role. Users find roles by searching for tags. Be sure to - # remove the '[]' above, if you add tags to this list. - # - # NOTE: A tag is limited to a single word comprised of alphanumeric characters. - # Maximum 20 tags per role. - -dependencies: [] - # List your role dependencies here, one per line. Be sure to remove the '[]' above, - # if you add dependencies to this list. \ No newline at end of file diff --git a/playbooks/roles/kafka/tasks/main.yml b/playbooks/roles/kafka/tasks/main.yml deleted file mode 100644 index 68c998ce8..000000000 --- a/playbooks/roles/kafka/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# tasks file for kafka \ No newline at end of file diff --git a/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-2.1-Testing b/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-2.1-Testing deleted file mode 100644 index 4b937b439..000000000 --- a/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-2.1-Testing +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQENBFlvv3sBCACmWFXdLI6OvV/vbfuq5r9rXoVQl3XP5e/AZFaqanheRJGixT5j -F6xbUvOwoej0RhJi1jSx7PXqtjAe30DGwlqK1pd07PupL2m3JizTPrNcEa/uODBg -AFpJDudxT5h0d0krZPKN6fsvIyI0PV6wipSzv7WGEr/BVeWfUIwqmY8b0fjc4wGg -CLoq9Jsbu1rVrUKhq4Wr9QIzugbazwWNvgBbbmQXWAE1LmmnZjc1CcBU/nLHPG9G -G3PD8FQaufnxNi3xqQd5C7NGI2eID05aV7W2EYh/NMs+X4JccEuvexv08uHwDUYh -bo7k6co3usaQJMuLVz4HtUq/IJcEDM6r++xbABEBAAG0SEByb2NrbnNtX3JvY2tu -c20tMi4xIChOb25lKSA8QHJvY2tuc20jcm9ja25zbS0yLjFAY29wci5mZWRvcmFo -b3N0ZWQub3JnPokBPQQTAQgAJwUCWW+/ewIbLwUJCWYBgAULCQgHAgYVCAkKCwIE -FgIDAQIeAQIXgAAKCRDeDJmNdJs4dRmtB/95rrE9qCy1IHhspyJI0t0VRW+/Bw1v -9kaOJTOfINNWAS1JJhyGTKloA7GEaaEpNFuOXqoyGpAGQCl5pfYmsjYKF1OR7t2H -zNA5YAeGADyJxDvUU9NdBwZLDXK7/vNQxcAQUQ5CiYm2zXj5rDnW2Rv4b45a59ax -4WAluDkYOKvaIl8Q4ouoGqDT5bB50az5Xd9nGsn/sXrkU67Vt33Py66V2UZRXN1d -HTq6EAYBoTrMa2rRGwgWFdaJ/TlI5qtTzj8fj6Nu/MH27q4nFKLx7RdgVjO/aQfa -xX3YBtLsqlIVEhNCMt7ujSJnRD7EAqCs9tspRo0rpuAcC2M5onk7ERrF -=z0ux ------END PGP PUBLIC KEY BLOCK----- diff --git a/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-pkgcloud-2_1 b/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-pkgcloud-2_1 deleted file mode 100644 index 08a2481ae..000000000 --- a/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-pkgcloud-2_1 +++ /dev/null @@ -1,64 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.4.11 (GNU/Linux) - -mQINBFoBDssBEACxQLrKVKVPHyPymRFwIbK8U2ork6RAWjFFJhNr57OufQRkIaZs -wxD+KS6XzhmJWljGzNspM33Aey8B7msFXDT+4v8wLjo87d2pWsqqOky8dmJFAF5z -IpDSSj01WStMf5t3LCxGG6FGMapLT1nPQlNCtS+iNCl7lnpAvUAwhZwlzmS2ERz1 -7PS5FmvOblfOf3M8sATL5xTxjh7b7oPB3HnbEGZE4Oop1NuRMhUJK5WvX4vzciuU -chIPeo5hjR6C4Ijsvbd2GnGuCaq5Be9JNm+kuNyfiA1eljxuR+uiQ/IEH121WasT -MaMrxL0LhceIamxfHUE1LN0/cIBdS2UBnbedJBymNC36TCH3GLOI1SHyFZUOLkYI -uXccondFDNfsKBkRS50ZrIHA+gB/WqNOeDVJijyVamLpU3wYDShd+gBBunGiIRs2 -6nRkKfK0rg1jDL+lQhqjK2qLgQpjqU3eRRKKfhg2QpkQB8GkeRq7rWxOgmEB/6d9 -end2h7TpIaDY9poi218mbPrI4b6Q1ffRin9Q3RQ9XArif1ZCQgUhYFGyLOqAafp8 -2aJkHlFnEkymp8ea9BRt1IPukZk+JIM9ZLZukLg4Ee9Hq8LSfzpna8zebOn0qgm6 -NcKlNkmkMGPC3Y9pXqW1fDmj1b9UDuTMnIZaIT+ZKZ5F7Rj7XSA1y2gmUQARAQAB -tGhodHRwczovL3BhY2thZ2VjbG91ZC5pby9yb2NrbnNtLzJfMSAoaHR0cHM6Ly9w -YWNrYWdlY2xvdWQuaW8vZG9jcyNncGdfc2lnbmluZykgPHN1cHBvcnRAcGFja2Fn -ZWNsb3VkLmlvPokCOAQTAQIAIgUCWgEOywIbLwYLCQgHAwIGFQgCCQoLBBYCAwEC -HgECF4AACgkQ1ctZO68h4nmWkA/+M1VgBuFNoY8sSslrCq6U5W7Rf7TXpodyaGQZ -MfAn7o6uSk69SaIaFIMfK+IPjGNu7VnNOMgwBKxOqZBJ9u06ougzUZUgZt1VFNbC -LW58pw7DAUd7voIeGuXmMhpE63DQltlq0pKuJQ09dz8XIysAtIb+d/zzwJCne5G7 -YHAwZiFxB2YMGwaf8YR07ZwTDqpyehBGPIBInaE1j5ClACvV3OHav7umylQzvh2H -ScrX64N7pUOuaLq3ifcOLEjfzWK0BW491P1d7mjkid8fnyTWZo7KxLpjNjfhGw48 -2qUgAzIY+rALUzcDi4/dahLzgqsNO0W/cGiuDntozMaxEbXpjpeluP8g5qyo5bYe -EHJcQk5w7sQuUuZj1FWC4DHLk554BwjlqKrFLkdkK8CAKQSauIngfjeJ5iMvA78B -oYLTuxkW0iTV/nCXgp4tIoji3/tx5VaTrxveyi+huYgy5wT9nWRmCczBQjeE+DzJ -1tXPAE+Cy0YEboY8p8KGIPu7bj4O9noBFlIWTXpoWHPz3pRqkfakbCjxYnQLSwhq -m6ymhkgy+DTdVl9zKzInVKNniXXNKjGfhDQvPRBbDGyLw4BauOtVC6XJk61z5BeM -5wy8L0dfhPTS/mdptcYDkDeUSTv75OpmtzGNAJWUJhkJrMujThgNOhWvfr6Hr9YE -axKWvhW5Ag0EWgEOywEQAM8kU+9L60ltUSvmtO2KfpSus/rSAFHbecPu3TNaRVMe -jjQB7vOjbcZ+aK8SPU87FfBDKk3ZxVY/aAsjxWBmNOY2MzcHohUARiR74ScKlic3 -TNo9klHCPzTGpBaDx01k+j7/aQG+WmaG5SlqdGhsuoDg4M+NH/0UAjKGf95PzBh2 -OMlQxiNkuXKbqQioPH41d0O2dxZY9p88SC+mbCUye865DG9eQ3jkJ+nqKDOwRvIa -Vtt600wA9jqMON1h/2UVqdh7Ea/gpvasQqd3D0EeFX8bK+AXDXkTPAR61Dt3Zz8p -0VFyi7Ymic5v5OHdNh6iZHV/ziqceuFNXdqzxGbA5ORZtIcs+M2LVvwNH4SnKOp+ -ZWz+S8eWkJm2bxHc0G0S2ddghjdMKFawbS+dvOcXEGbTJvn8eRS9RyaGvQDziM3s -nhNtiFnkHZmgUCL0BpCOxQbXVwotH2hturLShxm7qr96JbUFmy53mrvvUnZX4oCf -HBfFMo3rEpCA6VfM4ubbhDoDmtdLVPj0UcjK7rsV1jQEzh6oAaI5hhy2qtDhsbiz -gmj9mTEJJlastF902bvGMq45poMl/aOo0Ku1bu/ZsZ/NtJVhQA4c5NBA7Qd4RofO -Di+FW0NbvRmgdihtBKhdr9hkCEEHmDmDEFMyrm4owyjKZoHuk4ghJ05Qqy7bQPwV -ABEBAAGJBD4EGAECAAkFAloBDssCGy4CKQkQ1ctZO68h4nnBXSAEGQECAAYFAloB -DssACgkQjZqZUXdJHC6n6w//eacWIRpdG0MAazMDXikRUdaEbBQgG8kXYfBbcHfi -3H41kQKCLgDP78rMmMqtk4Tm1UmGcR+wTjA5X3cfqDbY8P8W8ycJSW6xfT2KLaNk -YbnXSctTs1Fe7kMNV9KL+koCsgINlhLVSRLg0VLL4OhYwK6hwblYRv6K3wR6Y6GO -MS14RoBt2UQvXjpCP/YYy+SZcX6+tgPVTX/u7ldw7eM10ujRk1eL/L6IOkM2Q8CZ -LeOVNZNbEbRvop+v86U7O6q1ZFT0tkhwtleRt1HE/gX4JSQoKhBrrFzV62yya09E -2synkPJFHdreApFIlkDTA4E8WCwePhqgqlYoUX0/yXURhH0aKPox6F87J8znGrsA -q9q7if2CrRLjqIQ6YKEYiKoXmAfjqANmjmcQx88qyyce7N2rDMhJkh4xQd+xcdiC -igOUnXf6bumRAZC7NhsdxP7FUIPfuTAtP9KSGBDdVYP9ote33FL2uB2ho09pHeY7 -IHizvyyxHWvEz5F8rPxb4pcHer4JJmHcdzU4QyuxVsyoKEHTophMyX+Qj/NMQ4jy -Yb6lGBGTNlDZ/E9d6y8TTcWA9tutgv6I1rqRVeiJOzW9p5MJZujLi0KM0pqGe5FN -OXeNO5YD8Rum636keeN/L0tRh6q7O+h0JFRBnzLxQTFgOqe7Vz6uVvNXkXrKHbYA -R43YyQ/+OgZEoZeOW/FtnloSnbDFtapm7JFBvKXvrgtQGwf4Y2QwLBFg0Ww4l7SZ -d0h6wBE8E4umKZQAPrvL0HKuyUJ7g6PlI6/3aDSsV1jl3/G+z0l3GpwAAkx2SZjZ -5IA5ktQTlQ3M/MKFy2YvV0AiZlmK7qaZ8PA1h3RyJtj9itHAYOZAIzc1rlDDn7Ap -H8zsnBOsGHoMHTJE4VqSp1aKRbskCRBClYaWlEt8sGXcUEzdJEHTrp9eYXuRvmAz -qoQtLw7mpUPZuWvObG/FVOrEhjHMd392SZ7yi5n8z0mdz/+eJ6hNIvF0c4yzez6i -im4YKwIRBasrSB2g1ZTIESFYZCN6RHc2dzgV1kV/bxh3tvsaMHhe4pBV9yLidJqW -aHM263LbeneHgmDHmLTOesnwxMrOTgFCVcf4hNtYfJ6+IIrFycyvexjyvhs2TKzQ -Qlvd78BgXQNpkUMc5NK/6dVQDZTPEguLRGutpnB13lY6vpHsoj7iC/ktT+0lvcI+ -2Icxe53iwRx3K/Zf9RoC1FUuRBb1Ux9gf/s7x87uRtCg8KEWubMTP0DFE0RdxUff -3z+v0uq1Fe9vFqI3I35YMRcjaAj0192rX/aoGFzp33HS8a08V9Cs98iuY2ceVi2M -XaxyC0F7/8t8qyLImRUIS87C/C+2wY6zUDwoE11f9KZF20sHylU= -=lIij ------END PGP PUBLIC KEY BLOCK----- diff --git a/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-pkgcloud-2_2 b/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-pkgcloud-2_2 deleted file mode 100644 index f1ca1cb0e..000000000 --- a/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-pkgcloud-2_2 +++ /dev/null @@ -1,64 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.4.11 (GNU/Linux) - -mQINBFvTa1cBEADANABD/sPpVNDQBriyHazOx2fe8V9eQLqZsR0Ht/LldeHprptg -SV0PJfXoY7K9oXyCOCV8PqikeIT5TC9TBkGvIQ/CCWXJCBK8WWHxiupewm/GBqj1 -M41aG4H7dQLrt1zkjtWVzLvCOD0HSFusT6jcZF84EDKvfuMdlsF5IZH/eGPiOFEg -j/CIwt9hGw3VFd7lEX52rln8eTVYXxNdonFOgCrYJdF1AV+pkk9ZREN4dPBWiMIC -GxMghgU9Iqe+9OnXoJUMbS5XjJmJ+2FSxbZZxawMfiqGIGH0DWmR3M8nsBZFUvpo -1VTh7li9S59/GQNgcknj7KLVd6DS/C1heMm68bNMhg65qsJQocTAIZyouTxqFDox -6NDLOmsumoSYvW9G3AWM0STeC+ulVhR7Y9tPHO0SCo2mM4CYypFt0P20D3ekEqCv -OdmkfiFwl/NnODhzP7m/EoTNkcc7RU4wqW2hWKOactuRlpXcwbYyQ+/nb61n1qPT -UZCQs9cxHsULAJ+pIzhfZmTAXyqB8nw1XquywoYNQX3812CHaYjzyfTnerU6PxF8 -ABSRf6Cja0JUzcRFrs3TpndHOfEZ0cwnwvj4Kozv15IrNvSAmMwwaDvBpeKFgpZF -tfPFzTzLbd/RWRjqNlE8GLurkdPZ9GlDp1NIBPd1tV43y8A6OzbMfCWWgwARAQAB -tGhodHRwczovL3BhY2thZ2VjbG91ZC5pby9yb2NrbnNtLzJfMiAoaHR0cHM6Ly9w -YWNrYWdlY2xvdWQuaW8vZG9jcyNncGdfc2lnbmluZykgPHN1cHBvcnRAcGFja2Fn -ZWNsb3VkLmlvPokCOAQTAQIAIgUCW9NrVwIbLwYLCQgHAwIGFQgCCQoLBBYCAwEC -HgECF4AACgkQhAQPtJsiGwm8KRAAn6W5VShxMO3EpSvH5q63NkCq6+S83F0IPxP4 -/sG7zK10xVnie0DfFNNbvplul8XnjvI73deZEOUmLx2Fu+YqJH6vuc5kBRVFv7Qe -UdMKW169SL4Kr4NJzFaNfpdEnqShhx+Hylq8Re5zpufrURKs4uY9h7eWbUMqw5ot -ux9nnj7BViuRgfcm0o/r7Pda9l8u+Mt2JM3qRhFmPbPrDcJ5oi9i1jh/6KAtmpJm -Dg/EpFfbnzm9PyDrPHS2DcqWOB9VzayzwzMB+vOadhiPATqakdtPYwazPOd8HRll -i/+pgxIpJVbsksMGGY7qTupYMGjf3oejUzL2DJaCb9vjta/yPhrxHXGJcc0FQCo1 -qsMwojGzL5kuJB6AonxV+t8PzjKDtHxD9p7+7MmiasfIDoR9xX/mSg1h9a3bUPFm -1TqV5RenBHHZI10eqZ7mBjZQuZzNpAIts7B33eOhn8nYKHir2Ko+UkXVKQ1xl0u3 -QhBwrAwPK7DAeclxPq+yykG7T1Rpz/lZKLtxGUA7VvAkz5NxzO/DHD4WQI3aA8EE -neLp9pf8IfP3+mYEafxQKt2T4cyTF1vGnnq2s4W2874H33dT24RPAg8P0ZnNIIfl -cOfCmjD/3BTSQ4WBSChdsIjwEKgBnt5poGg4agVQ8Bpm6OaeSe4XISVEHI6e8Zs9 -vrUDGW25Ag0EW9NrVwEQANWADwNWP3ZA4an7OJBpa3CtLBVDqpIviPK3lYLvl62W -LRR5Zqgn5l1LdWInmC1ADEg2jNcBO6PTUA5XHSPOOxuV5eimTHmjjcODcH35LLYF -niy3XTbKCqMDgOnDEFe7aPEZExczCii2v+mA2F24DZiMLvqimgakk3RcAHqZXs1a -5OeM5UGf3alns+Z4eBtWvTayumN6P9YSh0udMXJC2evoR5uh1s0uhrYdvW6FfHX4 -D8k4lziO87MnFtKcSQI7juDx2glpwxJq4y7HbXok1UjHE5qEFFt7dKAG0lcf/izC -Yr2XnYcdvTx4nQQ5U1yxy3p7+joxVe8WiJILVKVzSZWfhoOCGxtrm6yUSwzLElmj -o88d+hx92TRSn4MuVIsJO85rUoZhReXsf5MgcTPUUc09SkKWTpvCg3PsIFqVGSea -2MBTgHL2QYz8tzDEaW7zjmWC7f8WFGC941Mr+OGX2aWFf83b27abue7oQQOmuwO6 -nnjWSlHkMy22NGeyP5n+R7cmpFMJK9bxIJKyfv9T2c3p4d8Yr+5Eeb02sK+s3N+i -2edheQ59yw1rmrSK7dYrkrp0d65tqlxVEd54f/fP0iMpXqNusQ70puQv9eoKqG7r -thRkLd3sHRPsHkoKqpzMSxhcr0UzVXIrGifRc1M9MzDYmG1KJ/EvJ7xVTog+NTjN -ABEBAAGJBD4EGAECAAkFAlvTa1cCGy4CKQkQhAQPtJsiGwnBXSAEGQECAAYFAlvT -a1cACgkQwbsgaL870i+lKQ//YcJgvUL+AugM/lXufVG9MUDRFywjBzBeTf0QeeK+ -U6h6NwpsXMJGiG4NVHPK9hCSWOiJ2k6wjvgXfgiiA5Yq7DM5F9JmyqZKFSw/jSgc -o8+CnrvPxANaf4tKkb4QJMg4m6Xvg4H7U/W8CIN3X9bRMrmep/1haEq1VX8OZQt5 -6gCfWdWqFSmogRFFwkOZitQ1sZvFblQ+lg3Xt1fdq+buXUm5N7jVWH+ps6kqtwL4 -Kcut45dp+Sf/RAuRwDGcYItSBGSvf8g0kI7QMEgSHSFJ4nJlQtYD6Ytau9C1A4qL -M6VzR/GVx4dJv8cqUEUr/+CqcKzvWCaiQWHYKb0o0H7lJXDST4tesU9DhEXL99Ks -J2hl0dSHTRnpg4SLFKp7boE5i8RmSOoxYeesdE/BbJfXpfAYFDSLvrH7QjhZWmgl -PFrb+yNI1k0kk5hwjBgPRJqMN212KQYJ3u2t322Mj3JM73IRtiUL8kEkSwV9bx3+ -0/UeJ6pvLCpUtAsLcgGM8V3tVMHFiGSiP+jlSLOT8qQ5aNWynmR7rfyn5zNKWMaY -r9tk0f2k7eAS7ScJ8XpIwHDkI9nvlSNB15kbrxeaSIp+TycBPO6KvcP5z5mT1luv -JozXV+74GeSZbPIIURwDqQPVWurKLfiSbOF7wbGb6Eu18vLhVnLJocsWWrBVLIXr -V2cjZg//ZAGu9tb2iSW6F5ago5N3pwvMa25q1s4bFMAPW82UpiAqkA1hIkVHlJng -5ryWP8ZEgnIj29OUG434Uojjx0or5FOwkKPimM3YUfjcI8rmVnThZoebI5QIIXxX -MALI6ODEBlwPf6ZyRSD45N5E/6DRLqK8JEe1I4NE94hVLdYmdsukw/6CVG331ZnW -ysYXjkdMoJe9MZXKXDazH1yrTaCgh03Oji//ST/Rx7mOUcHUL2dDPjt+bR6RYssZ -FWZc2WmzM2/z1dhVGTbe72iXQT0mUjVKmWmJL8Vb0yw+bzIX6aScMdc5GE8xqi8A -nn2EwX23ef1L/hF3KDiN0Ky428/SuW47Q0begK3FgcN07lKQvkAhiUrxRyX+Zf0W -q/ISkah+wFce/8OIalOpuv9kk9inE+CjRn+IRRrupkkD5w+quR53d5wOJQzm3zRG -8BoX/IKMTnIj8Frzh5hoECd7p7d+JxRmwpg7RfEaRqLrTyu3w6cHYJjFB2Vcdsuh -mP8O5S9CV1F/+qkYbflEs7P7Xz5woy/hw35VMT4nJyR614HU0ZzfXGqxZwljSXVu -bZtCF43z9EA78bizNHHzHF71in4kWvmd/jxxp4rEwY1FH4HcgLbVTh4njjXzQb7v -HyFYHPZlz20Iclj5TcBhp8OvfkibIcRQySULaELQbODFjzFSP9Y= -=Cs3R ------END PGP PUBLIC KEY BLOCK----- diff --git a/playbooks/roles/sensor-common/handlers/main.yml b/playbooks/roles/sensor-common/handlers/main.yml deleted file mode 100644 index 8ceb48be2..000000000 --- a/playbooks/roles/sensor-common/handlers/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: force sync time - command: > - chronyc -a 'burst 3/4'; sleep 5; chronyc -a makestep - -- name: configure monitor interfaces - shell: > - for intf in {{ rock_monifs | join(' ') }}; do - /sbin/ifup ${intf}; - done - -- name: sshd restart - service: name=sshd state=restarted - -... diff --git a/playbooks/roles/sensor-common/tasks/install.yml b/playbooks/roles/sensor-common/tasks/install.yml deleted file mode 100644 index 835ae50a0..000000000 --- a/playbooks/roles/sensor-common/tasks/install.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: Install packages - yum: - name: - - python-pyOpenSSL - - firewalld - - chrony - - libselinux-python - state: present -... diff --git a/playbooks/roles/sensor-common/tasks/prechecks.yml b/playbooks/roles/sensor-common/tasks/prechecks.yml deleted file mode 100644 index c81cf5b7c..000000000 --- a/playbooks/roles/sensor-common/tasks/prechecks.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- - -... diff --git a/playbooks/roles/stenographer/meta/main.yml b/playbooks/roles/stenographer/meta/main.yml deleted file mode 100644 index 722379974..000000000 --- a/playbooks/roles/stenographer/meta/main.yml +++ /dev/null @@ -1,57 +0,0 @@ -galaxy_info: - author: your name - description: your description - company: your company (optional) - - # If the issue tracker for your role is not on github, uncomment the - # next line and provide a value - # issue_tracker_url: http://example.com/issue/tracker - - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: license (GPLv2, CC-BY, etc) - - min_ansible_version: 1.2 - - # If this a Container Enabled role, provide the minimum Ansible Container version. - # min_ansible_container_version: - - # Optionally specify the branch Galaxy will use when accessing the GitHub - # repo for this role. During role install, if no tags are available, - # Galaxy will use this branch. During import Galaxy will access files on - # this branch. If Travis integration is configured, only notifications for this - # branch will be accepted. Otherwise, in all cases, the repo's default branch - # (usually master) will be used. - #github_branch: - - # - # platforms is a list of platforms, and each platform has a name and a list of versions. - # - # platforms: - # - name: Fedora - # versions: - # - all - # - 25 - # - name: SomePlatform - # versions: - # - all - # - 1.0 - # - 7 - # - 99.99 - - galaxy_tags: [] - # List tags for your role here, one per line. A tag is a keyword that describes - # and categorizes the role. Users find roles by searching for tags. Be sure to - # remove the '[]' above, if you add tags to this list. - # - # NOTE: A tag is limited to a single word comprised of alphanumeric characters. - # Maximum 20 tags per role. - -dependencies: [] - # List your role dependencies here, one per line. Be sure to remove the '[]' above, - # if you add dependencies to this list. \ No newline at end of file diff --git a/playbooks/roles/stenographer/tasks/prechecks.yml b/playbooks/roles/stenographer/tasks/prechecks.yml deleted file mode 100644 index f1f0bfd74..000000000 --- a/playbooks/roles/stenographer/tasks/prechecks.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# Insert any prerequisite checks here -... diff --git a/playbooks/roles/suricata/handlers/main.yml b/playbooks/roles/suricata/handlers/main.yml deleted file mode 100644 index cff2e9c1e..000000000 --- a/playbooks/roles/suricata/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for suricata \ No newline at end of file diff --git a/playbooks/roles/suricata/meta/main.yml b/playbooks/roles/suricata/meta/main.yml deleted file mode 100644 index 722379974..000000000 --- a/playbooks/roles/suricata/meta/main.yml +++ /dev/null @@ -1,57 +0,0 @@ -galaxy_info: - author: your name - description: your description - company: your company (optional) - - # If the issue tracker for your role is not on github, uncomment the - # next line and provide a value - # issue_tracker_url: http://example.com/issue/tracker - - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: license (GPLv2, CC-BY, etc) - - min_ansible_version: 1.2 - - # If this a Container Enabled role, provide the minimum Ansible Container version. - # min_ansible_container_version: - - # Optionally specify the branch Galaxy will use when accessing the GitHub - # repo for this role. During role install, if no tags are available, - # Galaxy will use this branch. During import Galaxy will access files on - # this branch. If Travis integration is configured, only notifications for this - # branch will be accepted. Otherwise, in all cases, the repo's default branch - # (usually master) will be used. - #github_branch: - - # - # platforms is a list of platforms, and each platform has a name and a list of versions. - # - # platforms: - # - name: Fedora - # versions: - # - all - # - 25 - # - name: SomePlatform - # versions: - # - all - # - 1.0 - # - 7 - # - 99.99 - - galaxy_tags: [] - # List tags for your role here, one per line. A tag is a keyword that describes - # and categorizes the role. Users find roles by searching for tags. Be sure to - # remove the '[]' above, if you add tags to this list. - # - # NOTE: A tag is limited to a single word comprised of alphanumeric characters. - # Maximum 20 tags per role. - -dependencies: [] - # List your role dependencies here, one per line. Be sure to remove the '[]' above, - # if you add dependencies to this list. \ No newline at end of file diff --git a/playbooks/roles/suricata/tasks/all.yml b/playbooks/roles/suricata/tasks/all.yml deleted file mode 100644 index b8d028659..000000000 --- a/playbooks/roles/suricata/tasks/all.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- import_tasks: install.yml -- import_tasks: configure.yml -... diff --git a/playbooks/roles/suricata/tasks/main.yml b/playbooks/roles/suricata/tasks/main.yml deleted file mode 100644 index 44b4b2480..000000000 --- a/playbooks/roles/suricata/tasks/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# tasks file for suricata -- import_tasks: "{{ method }}.yml" -... diff --git a/playbooks/roles/zookeeper/meta/main.yml b/playbooks/roles/zookeeper/meta/main.yml deleted file mode 100644 index 722379974..000000000 --- a/playbooks/roles/zookeeper/meta/main.yml +++ /dev/null @@ -1,57 +0,0 @@ -galaxy_info: - author: your name - description: your description - company: your company (optional) - - # If the issue tracker for your role is not on github, uncomment the - # next line and provide a value - # issue_tracker_url: http://example.com/issue/tracker - - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: license (GPLv2, CC-BY, etc) - - min_ansible_version: 1.2 - - # If this a Container Enabled role, provide the minimum Ansible Container version. - # min_ansible_container_version: - - # Optionally specify the branch Galaxy will use when accessing the GitHub - # repo for this role. During role install, if no tags are available, - # Galaxy will use this branch. During import Galaxy will access files on - # this branch. If Travis integration is configured, only notifications for this - # branch will be accepted. Otherwise, in all cases, the repo's default branch - # (usually master) will be used. - #github_branch: - - # - # platforms is a list of platforms, and each platform has a name and a list of versions. - # - # platforms: - # - name: Fedora - # versions: - # - all - # - 25 - # - name: SomePlatform - # versions: - # - all - # - 1.0 - # - 7 - # - 99.99 - - galaxy_tags: [] - # List tags for your role here, one per line. A tag is a keyword that describes - # and categorizes the role. Users find roles by searching for tags. Be sure to - # remove the '[]' above, if you add tags to this list. - # - # NOTE: A tag is limited to a single word comprised of alphanumeric characters. - # Maximum 20 tags per role. - -dependencies: [] - # List your role dependencies here, one per line. Be sure to remove the '[]' above, - # if you add dependencies to this list. \ No newline at end of file diff --git a/playbooks/roles/zookeeper/tasks/CentOS-7.yml b/playbooks/roles/zookeeper/tasks/CentOS-7.yml deleted file mode 100644 index 23310b3c1..000000000 --- a/playbooks/roles/zookeeper/tasks/CentOS-7.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - - name: Install zookeeper packages - yum: - name: "{{ item.pkg }}" - state: "installed" - when: method == "install" - with_items: - - java-1.8.0-headless - - zookeeper -... diff --git a/playbooks/roles/zookeeper/tasks/all.yml b/playbooks/roles/zookeeper/tasks/all.yml deleted file mode 100644 index 5b2b60ec1..000000000 --- a/playbooks/roles/zookeeper/tasks/all.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- import_tasks: install.yml method=install -- import_tasks: configure.yml method=configure -... diff --git a/playbooks/roles/zookeeper/tasks/configure.yml b/playbooks/roles/zookeeper/tasks/configure.yml deleted file mode 100644 index 40bad9891..000000000 --- a/playbooks/roles/zookeeper/tasks/configure.yml +++ /dev/null @@ -1,7 +0,0 @@ -###################################################### -- name: Enable and start zookeeper - systemd: - name: zookeeper - state: "{{ 'started' if enable_zookeeper else 'stopped' }}" - enabled: "{{ enable_zookeeper }}" - when: with_zookeeper diff --git a/playbooks/roles/zookeeper/tasks/install.yml b/playbooks/roles/zookeeper/tasks/install.yml deleted file mode 100644 index 9d21e1616..000000000 --- a/playbooks/roles/zookeeper/tasks/install.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Include OS specific tasks - include_tasks: "{{ item }}" - with_first_found: - - "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml" - - "{{ ansible_distribution }}.yml" - - "{{ ansible_os_family }}.yml" - vars: - method: install - -... diff --git a/playbooks/roles/zookeeper/tasks/main.yml b/playbooks/roles/zookeeper/tasks/main.yml deleted file mode 100644 index c9a20b576..000000000 --- a/playbooks/roles/zookeeper/tasks/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# tasks file for zookeeper -- import_tasks: "{{ method }}.yml" diff --git a/playbooks/setup-deploy-host.yml b/playbooks/setup-deploy-host.yml new file mode 100644 index 000000000..fca795be0 --- /dev/null +++ b/playbooks/setup-deploy-host.yml @@ -0,0 +1,62 @@ +--- + +- hosts: localhost + gather_facts: false + tasks: + - name: Install RockNSM GPG keys + copy: + src: "{{ item }}" + dest: "/etc/pki/rpm-gpg/{{ item }}" + mode: 0644 + owner: root + group: root + with_items: + - RPM-GPG-KEY-RockNSM-2 + - RPM-GPG-KEY-RockNSM-Testing + - RPM-GPG-KEY-RockNSM-pkgcloud-2_3 + + - name: Trust RockNSM GPG keys + rpm_key: + state: present + key: "{{ item.path }}" + with_items: + - { repoid: "rocknsm_2_3", path: "/etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2" } + - { repoid: "rocknsm_2_3", path: "/etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_3" } + register: registered_keys + + - name: Configure RockNSM online repos + yum_repository: + file: rocknsm + name: "{{ item.name }}" + enabled: "{{ rock_online_install }}" + description: "{{ item.name }}" + baseurl: "{{ item.baseurl }}" + repo_gpgcheck: 1 + gpgcheck: "{{ item.gpgcheck }}" + gpgkey: + - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_3 + - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2 + sslverify: 1 + sslcacert: /etc/pki/tls/certs/ca-bundle.crt + metadata_expire: 300 + cost: 750 + state: present + with_items: + - { name: "rocknsm_2_3", gpgcheck: yes, baseurl: "{{ rocknsm_baseurl }}" } + - { name: "rocknsm_2_3-source", gpgcheck: no, baseurl: "{{ rocknsm_srpm_baseurl }}" } + + - name: Trust RockNSM GPG keys in yum + command: "yum -q makecache -y --disablerepo='*' --enablerepo='{{ item.repoid }}'" + with_items: + - { repoid: "rocknsm_2_3", test: "{{ rock_online_install }}" } + - { repoid: "rocknsm_2_3-source", test: "{{ rock_online_install }}" } + when: item.test | bool + changed_when: False + # TODO: Fix this ^^ + + - name: Install support packages + yum: + name: + - python2-jinja2 + - python2-markupsafe + state: latest diff --git a/playbooks/templates/broctl.service.j2 b/playbooks/templates/broctl.service.j2 deleted file mode 100644 index bde310154..000000000 --- a/playbooks/templates/broctl.service.j2 +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Bro Network Intrusion Detection System (NIDS) -After=network.target - -[Service] -Type=forking -User={{ bro_user }} -Group={{ bro_group }} -Environment=HOME={{ bro_data_dir }}/spool -ExecStart=/opt/bro/bin/broctl deploy -ExecStop=/opt/bro/bin/broctl stop - -[Install] -WantedBy=multi-user.target - diff --git a/playbooks/templates/easy-rsa-vars.j2 b/playbooks/templates/easy-rsa-vars.j2 deleted file mode 100644 index 7a69f0c0c..000000000 --- a/playbooks/templates/easy-rsa-vars.j2 +++ /dev/null @@ -1,80 +0,0 @@ -# easy-rsa parameter settings - -# NOTE: If you installed from an RPM, -# don't edit this file in place in -# /usr/share/openvpn/easy-rsa -- -# instead, you should copy the whole -# easy-rsa directory to another location -# (such as /etc/openvpn) so that your -# edits will not be wiped out by a future -# OpenVPN package upgrade. - -# This variable should point to -# the top level of the easy-rsa -# tree. -export EASY_RSA="`pwd`" - -# -# This variable should point to -# the requested executables -# -export OPENSSL="openssl" -export PKCS11TOOL="pkcs11-tool" -export GREP="grep" - - -# This variable should point to -# the openssl.cnf file included -# with easy-rsa. -export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA` - -# Edit this variable to point to -# your soon-to-be-created key -# directory. -# -# WARNING: clean-all will do -# a rm -rf on this directory -# so make sure you define -# it correctly! -export KEY_DIR="$EASY_RSA/keys" - -# Issue rm -rf warning -echo NOTE: If you run ./clean-all, I will be doing a rm -rf on $KEY_DIR - -# PKCS11 fixes -export PKCS11_MODULE_PATH="dummy" -export PKCS11_PIN="dummy" - -# Increase this to 2048 if you -# are paranoid. This will slow -# down TLS negotiation performance -# as well as the one-time DH parms -# generation process. -export KEY_SIZE=2048 - -# In how many days should the root CA key expire? -export CA_EXPIRE=3650 - -# In how many days should certificates expire? -export KEY_EXPIRE=3650 - -# These are the default values for fields -# which will be placed in the certificate. -# Don't leave any of these fields blank. -export KEY_COUNTRY="US" -export KEY_PROVINCE="MO" -export KEY_CITY="St Louis" -export KEY_ORG="RockNSM" -export KEY_EMAIL="info@rocknsm.io" -export KEY_OU="NSM Ninjas" - -# X509 Subject Field -export KEY_NAME="EasyRSA" - -# PKCS11 Smart Card -# export PKCS11_MODULE_PATH="/usr/lib/changeme.so" -# export PKCS11_PIN=1234 - -# If you'd like to sign all keys with the same Common Name, uncomment the KEY_CN export below -# You will also need to make sure your OpenVPN server config has the duplicate-cn option set -# export KEY_CN="CommonName" diff --git a/playbooks/templates/elasticsearch.yml.j2 b/playbooks/templates/elasticsearch.yml.j2 deleted file mode 100644 index a48b6867b..000000000 --- a/playbooks/templates/elasticsearch.yml.j2 +++ /dev/null @@ -1,88 +0,0 @@ -# ======================== Elasticsearch Configuration ========================= -# -# NOTE: Elasticsearch comes with reasonable defaults for most settings. -# Before you set out to tweak and tune the configuration, make sure you -# understand what are you trying to accomplish and the consequences. -# -# The primary way of configuring a node is via this file. This template lists -# the most important settings you may want to configure for a production cluster. -# -# Please consult the documentation for further information on configuration options: -# https://www.elastic.co/guide/en/elasticsearch/reference/index.html -# -# ---------------------------------- Cluster ----------------------------------- -# -# Use a descriptive name for your cluster: -# -cluster.name: {{ es_cluster_name }} -# -# ------------------------------------ Node ------------------------------------ -# -# Use a descriptive name for the node: -# -node.name: {{ es_node_name }} -# -# Add custom attributes to the node: -# -#node.attr.rack: r1 -# -# ----------------------------------- Paths ------------------------------------ -# -# Path to directory where to store the data (separate multiple locations by comma): -# -path.data: {{ es_data_dir }} -# -# Path to log files: -# -path.logs: {{ es_log_dir }} -# -# ----------------------------------- Memory ----------------------------------- -# -# Lock the memory on startup: -# -bootstrap.memory_lock: true -# -# Make sure that the heap size is set to about half the memory available -# on the system and that the owner of the process is allowed to use this -# limit. -# -# Elasticsearch performs poorly when the system is swapping the memory. -# -# ---------------------------------- Network ----------------------------------- -# -# Set the bind address to a specific IP (IPv4 or IPv6): -# -network.host: _local:ipv4_ -# -# Set a custom port for HTTP: -# -#http.port: 9200 -# -# For more information, consult the network module documentation. -# -# --------------------------------- Discovery ---------------------------------- -# -# Pass an initial list of hosts to perform discovery when new node is started: -# The default list of hosts is ["127.0.0.1", "[::1]"] -# -#discovery.zen.ping.unicast.hosts: ["host1", "host2"] -# -# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1): -# -#discovery.zen.minimum_master_nodes: -# -# For more information, consult the zen discovery module documentation. -# -# ---------------------------------- Gateway ----------------------------------- -# -# Block initial recovery after a full cluster restart until N nodes are started: -# -#gateway.recover_after_nodes: 3 -# -# For more information, consult the gateway module documentation. -# -# ---------------------------------- Various ----------------------------------- -# -# Require explicit names when deleting indices: -# -#action.destructive_requires_name: true diff --git a/playbooks/templates/es_cleanup.sh.j2 b/playbooks/templates/es_cleanup.sh.j2 deleted file mode 100644 index 88c621245..000000000 --- a/playbooks/templates/es_cleanup.sh.j2 +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -indexes=(bro suricata) -es_uri="http://localhost:9200" - -#Clean out old marvel indexes, only keeping the current index. -for i in $(curl -sSL ${es_uri}/_stats/indexes\?pretty\=1 | grep marvel | grep -Ev 'es-data|kibana' | grep -vF "$(date +%m.%d)" | awk '{print $1}' | sed 's/\"//g' 2>/dev/null); do - curl -sSL -XDELETE ${es_uri}/$i > /dev/null 2>&1 -done - -#Cleanup TopBeats indexes from 5 days ago. -#curl -sSL -XDELETE "http://127.0.0.1:9200/topbeat-$(date -d '5 days ago' +%Y.%m.%d)" 2>&1 - -for item in ${indexes[*]}; do - #Delete Logstash indexes from 60 days ago. - curl -sSL -XDELETE "${es_uri}/${item}-$(date -d '{{ elastic_delete_interval }} days ago' +%Y.%m.%d)" 2>&1 - #Close Logstash indexes from 15 days ago. - curl -sSL -XPOST "${es_uri}/${item}-$(date -d '{{ elastic_close_interval }} days ago' +%Y.%m.%d)/_close" 2>&1 -done - -#Make sure all indexes have replicas off -curl -sSL -XPUT 'localhost:9200/_all/_settings' -d ' -{ - "index" : { - "number_of_replicas" : 0 - } -}' > /dev/null 2>&1 diff --git a/playbooks/templates/filebeat.yml.j2 b/playbooks/templates/filebeat.yml.j2 index 1b39744e9..0124f94ed 100644 --- a/playbooks/templates/filebeat.yml.j2 +++ b/playbooks/templates/filebeat.yml.j2 @@ -1,20 +1,9 @@ #=========================== Filebeat prospectors ============================= -filebeat.prospectors: -- input_type: log - paths: - - {{ rock_data_dir }}/suricata/eve.json - json.keys_under_root: true - fields: - kafka_topic: suricata-raw - fields_under_root: true -- input_type: log - paths: - - {{ rock_data_dir }}/fsf/rockout.log - json.keys_under_root: true - fields: - kafka_topic: fsf-raw - fields_under_root: true +filebeat.config.inputs: + enabled: true + path: configs/*.yml + processors: - decode_json_fields: fields: ["message","Scan Time", "Filename", "objects", "Source", "meta", "Alert" ,"Summary"] @@ -42,7 +31,7 @@ processors: # Multiple outputs may be used. output.kafka: - hosts: ["localhost:9092"] + hosts: [{% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %}] topic: '%{[kafka_topic]}' required_acks: 1 diff --git a/playbooks/templates/nginx-rock.conf.j2 b/playbooks/templates/nginx-rock.conf.j2 deleted file mode 100644 index 6b91d0723..000000000 --- a/playbooks/templates/nginx-rock.conf.j2 +++ /dev/null @@ -1,27 +0,0 @@ -server { - listen 443 ssl; - - server_name {{ rock_hostname }}; - server_name _; - - ssl on; - ssl_certificate {{ http_tls_crt }}; - ssl_certificate_key {{ http_tls_key }}; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - #ssl_ciphers HIGH:!aNULL:!MD5; - ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'; - ssl_prefer_server_ciphers on; - ssl_dhparam {{http_tls_dhparams}}; - - auth_basic "Restricted Access"; - auth_basic_user_file /etc/nginx/htpasswd.users; - - location / { - proxy_pass http://localhost:5601; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_set_header Host $host; - proxy_cache_bypass $http_upgrade; - } -} diff --git a/playbooks/templates/pulledpork.conf.j2 b/playbooks/templates/pulledpork.conf.j2 deleted file mode 100644 index aa54811a7..000000000 --- a/playbooks/templates/pulledpork.conf.j2 +++ /dev/null @@ -1,230 +0,0 @@ -# Config file for pulledpork -# Be sure to read through the entire configuration file -# If you specify any of these items on the command line, it WILL take -# precedence over any value that you specify in this file! -{# I'm making the assumption here that a user will only configure - pulledpork if they have snort or suricata installed. This will - currently break otherwise. We could address this usecase fairly - easily if there is a need -#} -{% if with_suricata %} -{% set engine_basepath = "/etc/suricata" %} -{% elif with_snort %} -{% set engine_basepath = "/etc/snort" %} -{% endif %} - -####### -####### The below section defines what your oinkcode is (required for -####### VRT rules), defines a temp path (must be writable) and also -####### defines what version of rules that you are getting (for your -####### snort version and subscription etc...) -####### - -# You can specify one or as many rule_urls as you like, they -# must appear as http://what.site.com/|rulesfile.tar.gz|1234567. You can specify -# each on an individual line, or you can specify them in a , separated list -# i.e. rule_url=http://x.y.z/|a.tar.gz|123,http://z.y.z/|b.tar.gz|456 -# note that the url, rule file, and oinkcode itself are separated by a pipe | -# i.e. url|tarball|123456789, -#rule_url=https://www.snort.org/reg-rules/|snortrules-snapshot.tar.gz| -# NEW Community ruleset: -#rule_url=https://snort.org/downloads/community/|community-rules.tar.gz|Community -# NEW For IP Blacklisting! Note the format is urltofile|IPBLACKLIST| -# This format MUST be followed to let pulledpork know that this is a blacklist -#rule_url=http://talosintelligence.com/feeds/ip-filter.blf|IPBLACKLIST|open -# URL for rule documentation! (slow to process) -#rule_url=https://www.snort.org/reg-rules/|opensource.gz| -# THE FOLLOWING URL is for emergingthreats downloads, note the tarball name change! -# and open-nogpl, to avoid conflicts. -#rule_url=https://rules.emergingthreats.net/|emerging.rules.tar.gz|open-nogpl -# THE FOLLOWING URL is for etpro downloads, note the tarball name change! -# and the et oinkcode requirement! -#rule_url=https://rules.emergingthreatspro.com/|etpro.rules.tar.gz| -# NOTE above that the VRT snortrules-snapshot does not contain the version -# portion of the tarball name, this is because PP now automatically populates -# this value for you, if, however you put the version information in, PP will -# NOT populate this value but will use your value! -{% for rule in pulledpork_rules if (rule.test is undefined or rule.test) %} -rule_url={{ rule.url }}|{{ rule.file }}|{{ rule.apikey }} -{% endfor %} -# Specify rule categories to ignore from the tarball in a comma separated list -# with no spaces. There are four ways to do this: -# 1) Specify the category name with no suffix at all to ignore the category -# regardless of what rule-type it is, ie: netbios -# 2) Specify the category name with a '.rules' suffix to ignore only gid 1 -# rulefiles located in the /rules directory of the tarball, ie: policy.rules -# 3) Specify the category name with a '.preproc' suffix to ignore only -# preprocessor rules located in the /preproc_rules directory of the tarball, -# ie: sensitive-data.preproc -# 4) Specify the category name with a '.so' suffix to ignore only shared-object -# rules located in the /so_rules directory of the tarball, ie: netbios.so -# The example below ignores dos rules wherever they may appear, sensitive- -# data preprocessor rules, p2p so-rules (while including gid 1 p2p rules), -# and netbios gid-1 rules (while including netbios so-rules): -# ignore = dos,sensitive-data.preproc,p2p.so,netbios.rules -# These defaults are reasonable for the VRT ruleset with Snort 2.9.0.x. -ignore=deleted.rules,experimental.rules,local.rules -# IMPORTANT, if you are NOT yet using 2.8.6 then you MUST comment out the -# previous ignore line and uncomment the following! -# ignore=deleted,experimental,local,decoder,preprocessor,sensitive-data - -# What is our temp path, be sure this path has a bit of space for rule -# extraction and manipulation, no trailing slash -temp_path=/tmp - -####### -####### The below section is for rule processing. This section is -####### required if you are not specifying the configuration using -####### runtime switches. Note that runtime switches do SUPERSEED -####### any values that you have specified here! -####### - -# What path you want the .rules file containing all of the processed -# rules? (this value has changed as of 0.4.0, previously we copied -# all of the rules, now we are creating a single large rules file -# but still keeping a separate file for your so_rules! -rule_path={{ engine_basepath }}/rules/pulledpork.rules -# What path you want the .rules files to be written to, this is UNIQUE -# from the rule_path and cannot be used in conjunction, this is to be used with the -# -k runtime flag, this can be set at runtime using the -K flag or specified -# here. If specified here, the -k option must also be passed at runtime, however -# specifying -K at runtime forces the -k option to also be set -# out_path=/usr/local/etc/snort/rules/ - -# If you are running any rules in your local.rules file, we need to -# know about them to properly build a sid-msg.map that will contain your -# local.rules metadata (msg) information. You can specify other rules -# files that are local to your system here by adding a comma and more paths... -# remember that the FULL path must be specified for EACH value. -# local_rules=/path/to/these.rules,/path/to/those.rules -local_rules={{ engine_basepath }}/rules/local.rules - -# Where should I put the sid-msg.map file? -sid_msg={{ engine_basepath }}/sid-msg.map - -# New for by2 and more advanced msg mapping. Valid options are 1 or 2 -# specify version 2 if you are running barnyard2.2+. Otherwise use 1 -sid_msg_version=1 - -# Where do you want me to put the sid changelog? This is a changelog -# that pulledpork maintains of all new sids that are imported -sid_changelog=/var/log/sid_changes.log -# this value is optional - -####### -####### The below section is for so_rule processing only. If you don't -####### need to use them.. then comment this section out! -####### Alternately, if you are not using pulledpork to process -####### so_rules, you can specify -T at runtime to bypass this altogether -####### - -# What path you want the .so files to actually go to *i.e. where is it -# defined in your snort.conf, needs a trailing slash -#sorule_path='' - -# Path to the snort binary, we need this to generate the stub files -snort_path={{ "/sbin/suricata" if with_suricata else "/usr/sbin/snort" }} - -# We need to know where your snort.conf file lives so that we can -# generate the stub files -#config_path={{ engine_basepath }}/{{ "suricata.yaml" if with_suricata else "snort.conf" }} - -##### Deprecated - The stubs are now categorically written to the single rule file! -# sostub_path=/usr/local/etc/snort/rules/so_rules.rules - -# Define your distro, this is for the precompiled shared object libs! -# Valid Distro Types: -# Debian-6-0, Ubuntu-10-4 -# Ubuntu-12-04, Centos-5-4 -# FC-12, FC-14, RHEL-5-5, RHEL-6-0 -# FreeBSD-8-1, FreeBSD-9-0, FreeBSD-10-0 -# OpenBSD-5-2, OpenBSD-5-3 -# OpenSUSE-11-4, OpenSUSE-12-1 -# Slackware-13-1 -#distro='' - -####### This next section is optional, but probably pretty useful to you. -####### Please read thoroughly! - -# If you are using IP Reputation and getting some public lists, you will probably -# want to tell pulledpork where your blacklist file lives, PP automagically will -# de-dupe any duplicate IPs from different sources. -black_list={{ engine_basepath }}/rules/iplists/default.blacklist - -# IP Reputation does NOT require a full snort HUP, it introduces a concept whereby -# the IP list can be reloaded while snort is running through the use of a control -# socket. Please be sure that you built snort with the following optins: -# -enable-shared-rep and --enable-control-socket. Be sure to read about how to -# configure these! The following option tells pulledpork where to place the version -# file for use with control socket ip list reloads! -# This should be the same path where your black_list lives! -IPRVersion={{ engine_basepath }}/rules/iplists - -# The following option tells snort where the snort_control tool is located. -snort_control={{ "/usr/bin/snort_control" if with_snort else "" }} - -# What do you want to backup and archive? This is a comma separated list -# of file or directory values. If a directory is specified, PP will recurse -# through said directory and all subdirectories to archive all files. -# The following example backs up all snort config files, rules, pulledpork -# config files, and snort shared object binary rules. -# backup=/usr/local/etc/snort,/usr/local/etc/pulledpork,/usr/local/lib/snort_dynamicrules/ - -# what path and filename should we use for the backup tarball? -# note that an epoch time value and the .tgz extension is automatically added -# to the backup_file name on completeion i.e. the written file is: -# pp_backup.1295886020.tgz -# backup_file=/tmp/pp_backup - -# Where do you want the signature docs to be copied, if this is commented -# out then they will not be copied / extracted. Note that extracting them -# will add considerable runtime to pulledpork. -# docs=/path/to/base/www - -# The following option, state_order, allows you to more finely control the order -# that pulledpork performs the modify operations, specifically the enablesid -# disablesid and dropsid functions. An example use case here would be to -# disable an entire category and later enable only a rule or two out of it. -# the valid values are disable, drop, and enable. -# state_order=disable,drop,enable - - -# Define the path to the pid files of any running process that you want to -# HUP after PP has completed its run. -# pid_path=/var/run/snort.pid,/var/run/barnyard.pid,/var/run/barnyard2.pid -# and so on... -# pid_path=/var/run/snort_eth0.pid - -# This defines the version of snort that you are using, for use ONLY if the -# proper snort binary is not on the system that you are fetching the rules with -# This value MUST contain all 4 minor version -# numbers. ET rules are now also dependant on this, verify supported ET versions -# prior to simply throwing rubbish in this variable kthx! -# -# Suricata users - set this to 'suricata-3.x.x' to process rule files -# for suricata, this mimics the -S flag on the command line. -{% if with_suricata %} -snort_version=suricata -{% else %} -snort_version=2.9.0.0 -{% endif %} - - -# Here you can specify what rule modification files to run automatically. -# simply uncomment and specify the apt path. -# enablesid=/usr/local/etc/snort/enablesid.conf -# dropsid=/usr/local/etc/snort/dropsid.conf -disablesid=/etc/pulledpork/disablesid.conf -# modifysid=/usr/local/etc/snort/modifysid.conf - -# What is the base ruleset that you want to use, please uncomment to use -# and see the README.RULESETS for a description of the options. -# Note that setting this value will disable all ET rulesets if you are -# Running such rulesets -# ips_policy=security - -####### Remember, a number of these values are optional.. if you don't -####### need to process so_rules, simply comment out the so_rule section -####### you can also specify -T at runtime to process only GID 1 rules. - -version=0.7.2 diff --git a/playbooks/templates/rock_config.yml.j2 b/playbooks/templates/rock_config.yml.j2 index d9c8cce6d..1c6e3e826 100644 --- a/playbooks/templates/rock_config.yml.j2 +++ b/playbooks/templates/rock_config.yml.j2 @@ -28,11 +28,7 @@ rock_monifs: ############################################################################### # Sensor Resource Configuration ############################################################################### -# Set the hostname of the sensor: -rock_hostname: {{ rock_hostname }} - -# Set the Fully Qualified Domain Name: -rock_fqdn: {{ rock_fqdn }} +# Set hostname and fqdn in inventory file # Set the number of CPUs assigned to Bro: bro_cpu: {{ bro_cpu }} @@ -154,7 +150,7 @@ enable_zookeeper: {{ enable_zookeeper }} enable_kafka: {{ enable_kafka }} enable_lighttpd: {{ enable_lighttpd }} enable_fsf: {{ enable_fsf }} - +enable_filebeat: {{ enable_filebeat }} ############################################################################### # NEXT STEP: Deployment diff --git a/rock.spec b/rock.spec index a23a2fe98..ae88f8d52 100644 --- a/rock.spec +++ b/rock.spec @@ -1,8 +1,10 @@ -%global _rockdir /opt/rocknsm/rock +%global _rockdir /usr/share/rock +%global _sysconfdir /etc/rocknsm +%global _sbindir /usr/sbin Name: rock -Version: 2.2.0 -Release: 2 +Version: 2.3.0 +Release: 1 Summary: Network Security Monitoring collections platform @@ -12,7 +14,7 @@ Source0: https://github.com/rocknsm/%{name}/archive/v%{version}.tar.gz#/% BuildArch: noarch -Requires: ansible >= 2.4.2 +Requires: ansible >= 2.7.0 Requires: python-jinja2 >= 2.9.0 Requires: python-markupsafe >= 0.23 Requires: python-pyOpenSSL @@ -33,13 +35,17 @@ rm -rf %{buildroot} DESTDIR=%{buildroot} #make directories -mkdir -p %{buildroot}/%{_rockdir} -mkdir -p %{buildroot}/%{_rockdir}/bin +mkdir -p %{buildroot}/%{_rockdir}/roles mkdir -p %{buildroot}/%{_rockdir}/playbooks +mkdir -p %{buildroot}/%{_sbindir} +mkdir -p %{buildroot}/%{_sysconfdir} # Install ansible files -install -p -m 755 bin/deploy_rock.sh %{buildroot}/%{_rockdir}/bin/ -install -p -m 755 bin/generate_defaults.sh %{buildroot}/%{_rockdir}/bin/ +install -p -m 755 bin/deploy_rock.sh %{buildroot}/%{_sbindir}/ +install -p -m 755 bin/generate_defaults.sh %{buildroot}/%{_sbindir}/ +install -m 644 etc/hosts.ini %{buildroot}/%{_sysconfdir}/ +install -m 644 etc/config.yml %{buildroot}/%{_sysconfdir}/ +cp -a roles/. %{buildroot}/%{_rockdir}/roles cp -a playbooks/. %{buildroot}/%{_rockdir}/playbooks # make dir and install tests @@ -47,17 +53,26 @@ mkdir -p %{buildroot}/%{_rockdir}/tests cp -a tests/. %{buildroot}/%{_rockdir}/tests %files +%doc README.md LICENSE CONTRIBUTING.md +%config %{_rockdir}/playbooks/group_vars/all.yml +%config %{_rockdir}/playbooks/ansible.cfg +%config %{_sysconfdir}/hosts.ini +%config %{_sysconfdir}/config.yml %defattr(0644, root, root, 0755) -%{_rockdir}/playbooks/* +%{_rockdir}/roles/* +%{_rockdir}/playbooks/*.yml +%{_rockdir}/playbooks/templates/* %{_rockdir}/tests/* -%doc README.md LICENSE CONTRIBUTING.md -%config %{_rockdir}/playbooks/ansible.cfg -%attr(0755, root, root) %{_rockdir}/bin/deploy_rock.sh -%attr(0755, root, root) %{_rockdir}/bin/generate_defaults.sh +%attr(0755, root, root) %{_sbindir}/deploy_rock.sh +%attr(0755, root, root) %{_sbindir}/generate_defaults.sh %changelog +* Fri Jan 25 2019 Bradford Dabbs 2.3.0-1 +- Update file paths to match new structure +- Bump minimum Ansible version to 2.7 + * Tue Oct 30 2018 Derek Ditch 2.2.0-2 - Fixed issue with missing GPG keys (derek@rocknsm.io) - Update logrotate configuration (derek@rocknsm.io) diff --git a/playbooks/roles/bro/README.md b/roles/bro/README.md similarity index 100% rename from playbooks/roles/bro/README.md rename to roles/bro/README.md diff --git a/playbooks/roles/bro/defaults/main.yml b/roles/bro/defaults/main.yml similarity index 100% rename from playbooks/roles/bro/defaults/main.yml rename to roles/bro/defaults/main.yml diff --git a/playbooks/files/GeoIP.conf b/roles/bro/files/GeoIP.conf similarity index 100% rename from playbooks/files/GeoIP.conf rename to roles/bro/files/GeoIP.conf diff --git a/playbooks/files/bro-scripts-readme.txt b/roles/bro/files/bro-scripts-readme.txt similarity index 100% rename from playbooks/files/bro-scripts-readme.txt rename to roles/bro/files/bro-scripts-readme.txt diff --git a/playbooks/files/broctl.sh b/roles/bro/files/broctl.sh similarity index 100% rename from playbooks/files/broctl.sh rename to roles/bro/files/broctl.sh diff --git a/playbooks/files/profile.d-bro.sh b/roles/bro/files/profile.d-bro.sh similarity index 100% rename from playbooks/files/profile.d-bro.sh rename to roles/bro/files/profile.d-bro.sh diff --git a/roles/bro/handlers/main.yml b/roles/bro/handlers/main.yml new file mode 100644 index 000000000..327907f54 --- /dev/null +++ b/roles/bro/handlers/main.yml @@ -0,0 +1,13 @@ +--- +# handlers file for Bro + +- name: Reload bro + service: + name: bro + state: "{{ 'started' if enable_bro else 'stopped' }}" + +- name: Configure monitor interfaces + shell: > + for intf in {{ rock_monifs | join(' ') }}; do + /sbin/ifup ${intf}; + done diff --git a/roles/bro/tasks/main.yml b/roles/bro/tasks/main.yml new file mode 100644 index 000000000..0a7c4be06 --- /dev/null +++ b/roles/bro/tasks/main.yml @@ -0,0 +1,269 @@ +--- +# tasks file for bro + +- name: Install packages + yum: + name: + - bro + - bro-plugin-af_packet + - bro-plugin-kafka + - GeoIP + - GeoIP-update + - postfix + state: installed + +- name: Set monitor interface config + template: + src: templates/ifcfg-monif.j2 + dest: /etc/sysconfig/network-scripts/ifcfg-{{ item }} + mode: 0644 + owner: root + group: root + force: yes + with_items: "{{ rock_monifs }}" + +- name: Configure local ifup script + template: + src: templates/ifup-local.j2 + dest: /sbin/ifup-local + mode: 0755 + owner: root + group: root + force: yes + notify: Configure monitor interfaces + +- name: Configure GeoIP Update + copy: + src: GeoIP.conf + dest: /etc/GeoIP.conf + + # There's an issue w/ geoipupdate when env is empty +- name: Update GeoIP + shell: > + if [ "x$HTTP_PROXY" == "x" ]; then + unset HTTP_PROXY; + fi + if [ "x$http_proxy" == "x" ]; then + unset http_proxy; + fi + if [ "x$HTTPS_PROXY" == "x" ]; then + unset HTTPS_PROXY; + fi + if [ "x$https_proxy" == "x" ]; then + unset https_proxy; + fi + /usr/bin/geoipupdate + args: + creates: /usr/share/GeoIP/GeoLiteASNum.dat + register: result + failed_when: (result.rc != 0) and (result.rc != 1) + +- name: Create GeoIP symlinks + file: + src: "/usr/share/GeoIP/{{ item.src }}" + dest: "/usr/share/GeoIP/{{ item.dest }}" + force: yes + state: link + with_items: + - { src: 'GeoLiteCity.dat', dest: 'GeoIPCity.dat' } + - { src: 'GeoLiteCountry.dat', dest: 'GeoIPCountry.dat' } + - { src: 'GeoLiteASNum.dat', dest: 'GeoIPASNum.dat' } + - { src: 'GeoLiteCityv6.dat', dest: 'GeoIPCityv6.dat' } + +- name: Create bro group + group: + name: "{{ bro_group }}" + state: present + system: yes + +- name: Create bro user + user: + name: "{{ bro_user }}" + comment: "bro service account" + createhome: no + group: "{{ bro_group }}" + home: /var/spool/bro + shell: /sbin/nologin + system: yes + state: present + +- name: Create bro directories + file: + path: "{{ item }}" + mode: 0755 + owner: "{{ bro_user }}" + group: "{{ bro_group }}" + state: directory + setype: var_log_t + with_items: + - "{{ bro_data_dir }}" + - "{{ bro_data_dir }}/logs" + - "{{ bro_data_dir }}/spool" + +- name: Create /opt/bro for wandering users + file: + dest: "/opt/bro" + state: directory + +- name: Create note to wandering users + copy: + dest: "/opt/bro/README.md" + content: | + Hey! Where's my Bro? + ========================= + RockNSM has aligned the Bro package to be inline with Fedora packaging + guidelines in an effort to push the package upstream for maintenance. + Fedora and EPEL have a great community and we believe others can benefit + from our hard work. + Here's where you can find your stuff: + Bro configuration files + ----------------------- + /opt/bro/etc -> /etc/bro + Bro site scripts + ----------------------- + /opt/bro/share/bro/site -> /usr/share/bro/site + Bro logs and spool dirs (same as previous ROCK iterations) + ----------------------- + /opt/bro/logs -> /data/bro/logs + /opt/bro/spool -> /data/bro/spool + +- name: Create bro configs + template: + src: "{{ item }}.j2" + dest: "{{ bro_sysconfig_dir }}/{{ item }}" + mode: 0644 + owner: root + group: root + notify: Reload bro + loop: + - node.cfg + - broctl.cfg + - networks.cfg + +- name: Add bro custom scripts directory + file: + path: "{{ bro_site_dir }}/scripts" + owner: root + group: root + mode: 0755 + state: directory + +- name: Set permissions on broctl scripts + file: + path: "{{ bro_prefix }}/share/broctl/scripts" + owner: "{{ bro_user }}" + group: "{{ bro_user }}" + mode: 0755 + state: directory + +- name: Add README to scripts + copy: + src: bro-scripts-readme.txt + dest: "{{ bro_site_dir }}/scripts/README.txt" + mode: 0644 + owner: root + group: root + +- name: Checkout ROCK bro scripts + git: + repo: "{{ bro_rockscripts_repo }}" + dest: "{{ bro_site_dir }}/scripts/rock" + version: "{{ bro_rockscripts_branch }}" + when: rock_online_install + +- name: Deploy offline ROCK bro scripts + unarchive: + src: "{{ rock_cache_dir }}/{{ bro_rockscripts_filename }}" + dest: "{{ bro_site_dir }}/scripts/" + owner: root + group: root + creates: "{{ bro_site_dir }}/scripts/rock-scripts-{{ bro_rockscripts_branch | replace ('/', '-') }}" + remote_src: yes + when: not rock_online_install + +- name: Symlink offline ROCK bro scripts + file: + src: "{{ bro_site_dir }}/scripts/rock-scripts-{{ bro_rockscripts_branch | replace ('/', '-') }}" + dest: "{{ bro_site_dir }}/scripts/rock" + state: link + force: yes + when: not rock_online_install + +- name: Update owner for ROCK bro scripts + file: + path: "{{ bro_site_dir }}/scripts/rock" + owner: "{{ bro_user }}" + group: "{{ bro_group }}" + state: directory + recurse: yes + follow: yes + tags: + - bro_scripts + +- name: Add ROCK scripts to local.bro + lineinfile: + dest: "{{ bro_site_dir }}/local.bro" + line: "@load scripts/rock # ROCK NSM customizations" + state: present + +- name: Enable bro kafka output to local.bro + lineinfile: + dest: "{{ bro_site_dir }}/local.bro" + line: "@load scripts/rock/plugins/kafka" + state: present + when: with_kafka + +- name: Add bro aliases + copy: + src: profile.d-bro.sh + dest: /etc/profile.d/bro.sh + mode: 0644 + owner: root + group: root + +- name: Add broctl wrapper for admin use + copy: + src: broctl.sh + dest: /usr/sbin/broctl + mode: 0754 + owner: root + group: root + +- name: Set bro capabilities + capabilities: + path: /usr/bin/bro + capability: "{{ item }}" + state: present + with_items: + - "cap_net_raw+eip" + - "cap_net_admin+eip" + +- name: Set capstats capabilities + capabilities: + path: /usr/bin/capstats + capability: "{{ item }}" + state: present + with_items: + - "cap_net_raw+eip" + - "cap_net_admin+eip" + +- name: Set broctl cron + cron: + name: "broctl maintenance" + minute: "*/5" + cron_file: rocknsm_broctl + user: "{{ bro_user }}" + job: "/usr/bin/broctl cron >/dev/null 2>&1" + +- name: Initialize bro scripts for workers + command: /usr/bin/broctl install + args: + creates: "{{ bro_data_dir }}/spool/broctl-config.sh" + become: yes + become_user: "{{ bro_user }}" + +- name: Enable and start broctl + service: + name: bro + enabled: "{{ enable_bro }}" + notify: Reload bro diff --git a/playbooks/templates/bro-broctl.cfg.j2 b/roles/bro/templates/broctl.cfg.j2 similarity index 100% rename from playbooks/templates/bro-broctl.cfg.j2 rename to roles/bro/templates/broctl.cfg.j2 diff --git a/playbooks/files/bro-networks.cfg b/roles/bro/templates/networks.cfg.j2 similarity index 100% rename from playbooks/files/bro-networks.cfg rename to roles/bro/templates/networks.cfg.j2 diff --git a/playbooks/templates/bro-node.cfg.j2 b/roles/bro/templates/node.cfg.j2 similarity index 100% rename from playbooks/templates/bro-node.cfg.j2 rename to roles/bro/templates/node.cfg.j2 diff --git a/playbooks/roles/bro/tests/inventory b/roles/bro/tests/inventory similarity index 100% rename from playbooks/roles/bro/tests/inventory rename to roles/bro/tests/inventory diff --git a/playbooks/roles/bro/tests/test.yml b/roles/bro/tests/test.yml similarity index 100% rename from playbooks/roles/bro/tests/test.yml rename to roles/bro/tests/test.yml diff --git a/playbooks/roles/bro/vars/main.yml b/roles/bro/vars/main.yml similarity index 100% rename from playbooks/roles/bro/vars/main.yml rename to roles/bro/vars/main.yml diff --git a/playbooks/roles/sensor-common/defaults/main.yml b/roles/common/defaults/main.yml similarity index 100% rename from playbooks/roles/sensor-common/defaults/main.yml rename to roles/common/defaults/main.yml diff --git a/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-2 b/roles/common/files/RPM-GPG-KEY-RockNSM-2 similarity index 100% rename from playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-2 rename to roles/common/files/RPM-GPG-KEY-RockNSM-2 diff --git a/playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-Testing b/roles/common/files/RPM-GPG-KEY-RockNSM-Testing similarity index 100% rename from playbooks/roles/sensor-common/files/RPM-GPG-KEY-RockNSM-Testing rename to roles/common/files/RPM-GPG-KEY-RockNSM-Testing diff --git a/roles/common/files/RPM-GPG-KEY-RockNSM-pkgcloud-2_3 b/roles/common/files/RPM-GPG-KEY-RockNSM-pkgcloud-2_3 new file mode 100644 index 000000000..4b2d0cdbf --- /dev/null +++ b/roles/common/files/RPM-GPG-KEY-RockNSM-pkgcloud-2_3 @@ -0,0 +1,65 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFxnAk4BEADjdISX9QYmWogLmOnMdOgVIViurQc4y3VvQxVW2Klv+tvBwdkF +60wWt7GiH+Ubn9vEokg1FydOu3KLIO0SFaHVSCiVDZcjS9AObFCk906mj3wQZYfk +TE7P7T8gViylb47mkEFi/GYbR6eI9BIEzNOKeWOfunGXII/RD7q98bhbc2RduBro +XT1DkO/ahKqAuehvSIKgxG7o/g0veU124OsXJj/aLmVudn7+i41kG4cf/wMulSS+ +byMYxahnxmaFITGWqegncQHjHZmUMVGY3NrUtH38LAD92CRY/uhuGti5zbcx5N5u +fizgFDppreK/LZAvFyI2rF8ZdlIOjYp9lbXcuCuOkkxZ3vDYkthToSf/CKux6FSb +1PrbFF0urc4XNWiMqcCq5RvJEBmh1qUzItGuL3upu/ZHf5jtvpTaRg/8M6dGlOYp +si9+f2C4o3Rh3kqTePLp75Bi6AXwDf2FF1WTNQ6XlcGoVf8ZNna9pVJ5AJP0r2oo +3sHc8KdqjLZeMzSqUku/D9qzcnfJD5I+8qHxTB181WTCXkL+9BG6dQ6BnBmXop4w +ON+LuOJLHvxpF4kwfjypU7DjqJNia1IJIicIldXRoL0kb9NdWAnpYDgVdQHCC34N +2pr1wkvVdMliqnYdDzJAV6qSRyumtrtF0Klp7RKxw6wRV6clJwwm11xZVwARAQAB +tGhodHRwczovL3BhY2thZ2VjbG91ZC5pby9yb2NrbnNtLzJfMyAoaHR0cHM6Ly9w +YWNrYWdlY2xvdWQuaW8vZG9jcyNncGdfc2lnbmluZykgPHN1cHBvcnRAcGFja2Fn +ZWNsb3VkLmlvPokCTgQTAQoAOBYhBAE8G3S4gPi5KICQ9INtbMA3lwi5BQJcZwJO +AhsvBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEINtbMA3lwi5VlYP/jA47mMx +f5PKYlBRmcMGBzhEkGvpbRNM4dzoauOKtRh6l2xbi4dkLwf7InNFEXMha3kdXjOP +vBD9rUPt8pWCznCeuXbHblmczgL0Eb0FuPZzcWsKkrqpNP0p9GUs9enpUGKAlYav +wUy2Cx7C0DZ69bnEMVtJGqYvVwUYLP2ruvKF31K64h4NxcWatUQdLo9Bkd7jjHFc +Ku5ems36cakAsBpKwR6cByl08vR2cmHX36lMQCRWbq0KOZp1Z9Dn0JDJ3bkzqCPl +gHlYUj/vvksJaOFgldACjw72R0f0eHTvinnLZ4f8MAmURE7AnfzHZashDMaMEZXh +ZmDWIZxMGqdnTizoLuRcdrWn68C8Gqnc6lwmpNt7EqDubgpcw5Ob8vz8iS3Njg3/ +D27+bMIf42s2m6nTt7mrfM0kbxmghen6uQMGXtRc2gzw7YUW2ZRWM0OdUUww7GrL +WlcA3n0I1jAg5cIXaHHIEcRsTZKdO1kQgOzh7jKIrvWovMXLoEhs6su9sr7NcdK9 +7ynblcKNlBP1q523QqPxVpYD3CPCoUN8bGiFU8rHbFUXeLb503FNHRkkv4GPh6V6 +Rq9qtN6yhuMEvKLQpscvZ/3HiZHp1An6A9KnnPW48hFhShzRhsQuB8/HT+EvtkyO +mVZGkByIoGpUsFDQwhlPdY+rZgAg1JBjVJ6nuQINBFxnAk4BEACkyFX5QAhFzjmd +WloMtXEe6TyrfhFXkA9YgFypZryWikfyGHDudXYfGlaRS82CJfeLCvQ/qglUOvPT +jMTE+mtCDcF6+nefm5sYg6AV7oRX2viRnsHV4NYijPVOVjjeJLcJ64uHzr3vM+GB +kUIEkxWiN9gLJlwRuCDmFG7KMi6TXUlEegxNA0SVWv3RUfwWow2HatGC6iF1KE/3 +SmMQyzZniekoRpWTta7QcxcszpDnRESpkqv75E+x0hj6XilMmPNGvEYk+tI6etIF +RpNuwjYSS7oYEhmGb3yy5ehfMtqIkBjByXnkQ3lNvwcKPUZJ5NriGfp6AL8xxccf +MdjCSgM4jHwtEC29VoTeAOvgFMmfl9NzcDiq9BMxSX4odNv0V8zCrQiwOYfynFY+ +61YU6ctQn75CVmqpXIAd4lvEYmPnxbTklHkzt9//JHwJ5xqLNJIEnc6h7GMlNojF +0Rtm4kQkdYBc2EBR2YF6pykfEGC+EjqSnMbHAuuvIQxUZZP9Fhv9Rw/bg0+NiLzj +gzarz5P9L3EzF1EVmEqDsmOoGi4JY1iD6zDkVbE/OEOyOyqBcewv4RPThOKaWjAn +W9VCNBQox+gsTKxsoTKXdDVdOV1o990K+GJuH8wDPhuGOE7vRR3j/FxRUfbNEuk5 +sOVl6a7XG3k7QZGvzbOpdi3qrpBJkwARAQABiQRsBBgBCgAgFiEEATwbdLiA+Lko +gJD0g21swDeXCLkFAlxnAk4CGy4CQAkQg21swDeXCLnBdCAEGQEKAB0WIQR7StvZ +DsNenLrqjWsfheIh5CQl8wUCXGcCTgAKCRAfheIh5CQl81sGEACTAijJr+YCbrXJ +vm8bCHxw7v1cqPPsA8VFAxYPJYVS0nn8pc0sCbxK4rl1lz9Q72Impu+U1QXN1ANB +hwXpWo9nDfk2pmKcburiYRFCMQcxP2n3v1zOHwJ3TWmucQ1Js1V6MIt1M+KtSru0 +QYBNcOAXAkCF/FUAwGKFHHHrj6jwH2J7ZCYY6BSi5NKWLBi973Jbm80cP1caz6bN +XNVO8CiW2Dgd0WtJ2YqoW6ZQbg+fUSR8ZvkLh2at8ozae8CiVefZBcqWACk9V2Gq +juEhtHxFPZQ075Iz4Og1ypc3Gi0AjpYQvFKydNRcavRxF/L+EKgY+n5EzLInWjSL +hkwjTzkUnPBTiaG6WQMznS3dBh+gaUEerJVLFQhQIOutXF9Jk3qX3P2Ygq6947/t +M4i/6d/HsFTkjB3sEnDaY/PMUL59xwYzuoP9kZ/krO8I9WZesFVs3LaqBKLn6GUK +a/EBrYhsvdrSND6bJNklAp3nOf5VaLNTv0fXrO7z6BVTlWpJEtiIMCsUr2DM5JsU +3KWs8TFkhwqIiZDMZASRrxDf19oubpFWuuONydHwOtZGWEFvtK98y5NBhL+Qg73Z +VhZ/zEao7Y0mYqtMfDVTVUmZrp399xiYT2QcI+AMobbKhHTvIY6DZj1S/Z5kafWv +sp1VaN3mdGI1p0NQLdWKxkpOPa39EvQdEACtFO4OT4WywhnVMZyyKN+giVfQyyQh +Ej6F1FlX51b/h1uv0zcwRbvbMsJgHVAlDzjTAAKs1Z4COu4jw9c4paUKA3dZIDE4 +VWVo32LyD4XrIy2BFml1ZrE2wFrXrvnFf2GVIRxS4+lmTpPnPjxHYuSw1iRQZQKL +RaN1ZHtr5VKpgQ8GW//twBCg4YD4QADjXNxgD0sJuyDGXEEg5mnSqJx/DFbwhNBP +PiiKYLlDpemc4RVLhCYD+P6BvMgh1obcXqq1jMtE9u7XpX3OsTdTcx0LIAjRmw0d +pX4dqy+wsfuokymI98jeoX2qCV0HHko7W8xnL8Cgd+jqs+A+eIXA31+0t17TzR4i +tn7nE5UBZd42J1fZwO2V2qSU/ey2Z9vxEa+PW+x7j3D3jhb6SWE4vd1iwlhL29uV +vWSz1zX0hNXtamp3tgolQZteWL/iUBeVea8EIasy5YdOXh26z2lMB4b9PxvYYZoU +F2mbi/PN+B2PxJTM7c8Ftw9zazm0hW30IpdNqgh1jIZXa8hy+rP2qhoRxLCK9re3 +jgGcG7hHwbwN9Ybh75znLweSWvFtlEinIkAtSyOwHA25gAnKSOlRRYKZSeR2MGXO +hguWdd/pwEVmdl1guOqeWqbAuO2bIfYWaQM5ZA2Fap0owSg4BMjvwAYaeA7R0Qmk +WFfwwUndp3eE7w== +=R8Je +-----END PGP PUBLIC KEY BLOCK----- diff --git a/playbooks/files/etc-issue.in b/roles/common/files/etc-issue.in similarity index 100% rename from playbooks/files/etc-issue.in rename to roles/common/files/etc-issue.in diff --git a/playbooks/files/nm-issue-update b/roles/common/files/nm-issue-update similarity index 100% rename from playbooks/files/nm-issue-update rename to roles/common/files/nm-issue-update diff --git a/playbooks/files/rockctl b/roles/common/files/rockctl similarity index 100% rename from playbooks/files/rockctl rename to roles/common/files/rockctl diff --git a/roles/common/handlers/main.yml b/roles/common/handlers/main.yml new file mode 100644 index 000000000..29b4ae27b --- /dev/null +++ b/roles/common/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Restart sshd + service: + name: sshd + state: restarted +... diff --git a/playbooks/roles/sensor-common/tasks/configure-pipelining.yml b/roles/common/tasks/configure-pipelining.yml similarity index 100% rename from playbooks/roles/sensor-common/tasks/configure-pipelining.yml rename to roles/common/tasks/configure-pipelining.yml diff --git a/playbooks/roles/sensor-common/tasks/configure-time.yml b/roles/common/tasks/configure-time.yml similarity index 97% rename from playbooks/roles/sensor-common/tasks/configure-time.yml rename to roles/common/tasks/configure-time.yml index e420fc46c..93f2a9b5f 100644 --- a/playbooks/roles/sensor-common/tasks/configure-time.yml +++ b/roles/common/tasks/configure-time.yml @@ -1,6 +1,6 @@ --- # timedatectl.yml - configure ntp -- name: Install Chrony +- name: Install chrony yum: name: chrony state: installed diff --git a/playbooks/roles/sensor-common/tasks/configure.yml b/roles/common/tasks/configure.yml similarity index 61% rename from playbooks/roles/sensor-common/tasks/configure.yml rename to roles/common/tasks/configure.yml index 2ff7cdb5e..9fb83fb36 100644 --- a/playbooks/roles/sensor-common/tasks/configure.yml +++ b/roles/common/tasks/configure.yml @@ -8,7 +8,7 @@ # You will want to remount this to your "good" storage after the build. # This is just to make sure all the paths in the configs are proper. ############### -- name: Create ROCK data dir +- name: Create RockNSM data directory file: path: "{{ rock_data_dir }}" mode: 0755 @@ -16,40 +16,39 @@ group: "{{ rock_data_group }}" state: directory -- name: Create ROCK NSM directory +- name: Create RockSNM conf directory file: - path: "{{ rocknsm_dir }}" + path: "{{ rock_conf_dir }}" mode: 0755 owner: root group: root state: directory -###################################################### -######### Configure the monitoring interface ######### -###################################################### -- name: Set monitor interface config - template: - src: templates/ifcfg-monif.j2 - dest: /etc/sysconfig/network-scripts/ifcfg-{{ item }} - mode: 0644 +- name: Create RockNSM directory + file: + path: "{{ rocknsm_dir }}" + mode: 0755 owner: root group: root - force: yes - with_items: "{{ rock_monifs }}" + state: directory -- name: Configure local ifup script - template: - src: templates/ifup-local.j2 - dest: /sbin/ifup-local - mode: 0755 +- name: Download RockNSM elastic configs + get_url: + url: "{{ rock_dashboards_url }}" + dest: "{{ rock_cache_dir }}/{{ rock_dashboards_filename }}" + mode: 0644 + when: (with_kibana or with_elasticsearch or with_logstash) and rock_online_install + +- name: Extract RockNSM elastic configs + unarchive: + src: "{{ rock_cache_dir }}/{{ rock_dashboards_filename }}" + dest: /opt/rocknsm owner: root group: root - force: yes - notify: configure monitor interfaces + creates: "{{ rock_module_dir }}" + remote_src: yes + when: (with_kibana or with_elasticsearch or with_logstash) -####################################################### -#################### Disable IPv6 ##################### -####################################################### - name: Disable IPv6 for all interfaces sysctl: name: net.ipv6.conf.all.disable_ipv6 @@ -62,38 +61,32 @@ value: 1 sysctl_file: "{{ rock_sysctl_file }}" -- name: Disable IPv6 in SSHD +- name: Disable IPv6 in sshd lineinfile: dest: /etc/ssh/sshd_config regexp: AddressFamily line: AddressFamily inet notify: - - sshd restart - -- name: Remove localhost6 from hosts file - lineinfile: - dest: /etc/hosts - regexp: localhost6 - state: absent + - Restart sshd -####################################################### -#################### DNS Changes ###################### -####################################################### -- name: Set hostname in hosts file +- name: Add the inventory into /etc/hosts lineinfile: dest: /etc/hosts - insertafter: 127.0.0.1 - line: 127.0.0.2 {{ rock_fqdn }} {{ rock_hostname }} + regexp: '.*{{ item }}$' + line: "{{ hostvars[item]['ansible_default_ipv4']['address'] }} {{item}}" + state: present + when: hostvars[item]['ansible_facts']['default_ipv4'] is defined + with_items: + - "{{ groups['all'] }}" - name: Set system hostname hostname: - name: "{{ rock_fqdn }}" + name: "{{ inventory_hostname }}" -####################################################### -################## Setup Yum Repos #################### -####################################################### +- name: Re-run Setup to populate changes + setup: -- name: Setup EPEL repo +- name: Setup EPEL repository yum_repository: name: epel description: EPEL YUM repo @@ -108,7 +101,7 @@ key: http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-7 when: rock_online_install -- name: Setup Elastic repo +- name: Setup elastic repository yum_repository: name: elastic-6.x description: Elastic Stack repository for 6.x @@ -127,15 +120,15 @@ with_items: - RPM-GPG-KEY-RockNSM-2 - RPM-GPG-KEY-RockNSM-Testing - - RPM-GPG-KEY-RockNSM-pkgcloud-2_2 + - RPM-GPG-KEY-RockNSM-pkgcloud-2_3 - name: Trust RockNSM GPG keys rpm_key: state: present key: "{{ item.path }}" with_items: - - { repoid: "rocknsm_2_2", path: "/etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2" } - - { repoid: "rocknsm_2_2", path: "/etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_2" } + - { repoid: "rocknsm_2_3", path: "/etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2" } + - { repoid: "rocknsm_2_3", path: "/etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_3" } - { repoid: "rocknsm-testing", path: "/etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-Testing"} - { repoid: "rocknsm-local", path: "/etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2" } register: registered_keys @@ -150,7 +143,7 @@ repo_gpgcheck: 1 gpgcheck: "{{ item.gpgcheck }}" gpgkey: - - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_2 + - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_3 - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2 sslverify: 1 sslcacert: /etc/pki/tls/certs/ca-bundle.crt @@ -158,8 +151,8 @@ cost: 750 state: present with_items: - - { name: "rocknsm_2_2", gpgcheck: yes, baseurl: "{{ rocknsm_baseurl }}" } - - { name: "rocknsm_2_2-source", gpgcheck: no, baseurl: "{{ rocknsm_srpm_baseurl }}" } + - { name: "rocknsm_2_3", gpgcheck: yes, baseurl: "{{ rocknsm_baseurl }}" } + - { name: "rocknsm_2_3-source", gpgcheck: no, baseurl: "{{ rocknsm_srpm_baseurl }}" } - name: Configure RockNSM online testing repos yum_repository: @@ -185,13 +178,13 @@ gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2 repo_gpgcheck: "{{ rock_offline_gpgcheck }}" cost: 500 - when: "{{ not rock_disable_offline_repo | bool }}" + when: not rock_disable_offline_repo - name: Trust RockNSM GPG keys in yum command: "yum -q makecache -y --disablerepo='*' --enablerepo='{{ item.repoid }}'" with_items: - - { repoid: "rocknsm_2_2", test: "{{ rock_online_install }}" } - - { repoid: "rocknsm_2_2-source", test: "{{ rock_online_install }}" } + - { repoid: "rocknsm_2_3", test: "{{ rock_online_install }}" } + - { repoid: "rocknsm_2_3-source", test: "{{ rock_online_install }}" } - { repoid: "rocknsm-testing", test: "{{ rock_online_install }}" } - { repoid: "rocknsm-local", test: "{{ not rock_online_install }}" } when: item.test | bool @@ -212,4 +205,71 @@ - { name: updates, mirror: "http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra" } - { name: extras, mirror: "http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra"} +- name: Install core packages + yum: + name: "{{ rocknsm_package_list }}" + state: installed + +- name: Enable and start firewalld + service: + name: firewalld + enabled: yes + state: started + +- name: Configure firewall ports + firewalld: + port: "{{ item[1].port }}" + source: "{{ item[0] }}" + permanent: yes + state: enabled + immediate: yes + with_nested: + - "{{ rock_mgmt_nets }}" + - + - { port: "22/tcp" } + +- name: Ensure cache directory exists + file: + dest: "{{ rock_cache_dir }}" + state: directory + mode: 0755 + +- name: Install RockNSM control script + copy: + src: rockctl + dest: /usr/local/bin/rockctl + mode: 0755 + owner: root + group: root + +- name: Create RockNSM control script symlink + file: + src: "/usr/local/bin/rockctl" + dest: "/usr/sbin/rockctl" + force: yes + state: link + +- name: Set RockNSM Version + copy: + content: "{{ rock_version }}" + dest: /etc/rocknsm/rock-version + mode: 0644 + owner: root + group: root + +- name: Install RockNSM /etc/issue + copy: + src: etc-issue.in + dest: /etc/issue.in + mode: 0644 + owner: root + group: root + +- name: Add NetworkManager RockNSM hook + copy: + src: nm-issue-update + dest: /etc/NetworkManager/dispatcher.d/50-rocknsm-issue-update + mode: 0755 + owner: root + group: root ... diff --git a/playbooks/roles/sensor-common/tasks/deploy.yml b/roles/common/tasks/deploy.yml similarity index 71% rename from playbooks/roles/sensor-common/tasks/deploy.yml rename to roles/common/tasks/deploy.yml index a5d21237c..b2f769b9c 100644 --- a/playbooks/roles/sensor-common/tasks/deploy.yml +++ b/roles/common/tasks/deploy.yml @@ -1,9 +1,6 @@ --- # deploy.yml - Common tasks for ROCK -- import_tasks: prechecks.yml - import_tasks: configure.yml - import_tasks: configure-time.yml - import_tasks: configure-pipelining.yml -- import_tasks: install.yml - ... diff --git a/playbooks/roles/sensor-common/tasks/main.yml b/roles/common/tasks/main.yml similarity index 100% rename from playbooks/roles/sensor-common/tasks/main.yml rename to roles/common/tasks/main.yml diff --git a/playbooks/roles/docket/README.md b/roles/docket/README.md similarity index 100% rename from playbooks/roles/docket/README.md rename to roles/docket/README.md diff --git a/playbooks/roles/docket/Vagrantfile b/roles/docket/Vagrantfile similarity index 100% rename from playbooks/roles/docket/Vagrantfile rename to roles/docket/Vagrantfile diff --git a/playbooks/roles/docket/defaults/main.yml b/roles/docket/defaults/main.yml similarity index 77% rename from playbooks/roles/docket/defaults/main.yml rename to roles/docket/defaults/main.yml index bd483de5f..355c9aaee 100644 --- a/playbooks/roles/docket/defaults/main.yml +++ b/roles/docket/defaults/main.yml @@ -35,6 +35,7 @@ docket_group: docket # An empty string defaults to all interfaces on IPv4 docket_listen_ip: "0.0.0.0" docket_listen_port: "{{ 8443 if docket_tls else 8080 }}" +docket_host: "{{ hostvars[groups['docket'][0]]['ansible_hostname'] }}" docket_web_server: lighttpd docket_web_pemfile: "/etc/pki/tls/private/lighttpd_docket.pem" @@ -46,17 +47,17 @@ docket_url_resultspath: /results docket_url_pattern: "(^/results/|^/app/docket/)" # Vars to generate keys/certs -docket_x509_dir: /etc/pki/docket/ -docket_x509_key: "{{docket_x509_dir}}/docket_{{ ansible_default_ipv4.address }}_key.pem" -docket_x509_cn: "{{ ansible_hostname }}_docket" +docket_x509_dir: /etc/pki/docket +docket_x509_key: "{{ docket_x509_dir }}/docket_{{ hostvars[groups['docket'][0]]['ansible_default_ipv4']['address'] }}_key.pem" +docket_x509_cn: "{{ docket_host }}_docket" docket_x509_o: Stenographer docket_x509_c: XX docket_x509_user: root docket_x509_group: docket # These should be overridden by host-specific vars -steno_host: "127.0.0.1" -steno_sensor: "{{ansible_hostname}}" +steno_host: "{{ hostvars[groups['stenographer'][0]]['ansible_default_ipv4']['address'] }}" +steno_sensor: "{{ hostvars[groups['stenographer'][0]]['ansible_hostname'] }}" steno_port: 1234 steno_certs_dir: /etc/stenographer/certs steno_ca_cert: "{{steno_certs_dir}}/ca_cert.pem" @@ -65,4 +66,4 @@ steno_ca_key: "{{steno_certs_dir}}/ca_key.pem" # This is used to generate the config for docket on # where to connect and how to authenticate docket_steno_instances: -- { host: "{{ steno_host}}", sensor: "{{ steno_sensor }}", port: "{{ steno_port }}", key: "{{ docket_x509_key }}", cert: "{{docket_x509_dir}}/docket-{{inventory_hostname}}_sensor-{{inventory_hostname}}_cert.pem", ca: "{{docket_x509_dir}}/{{inventory_hostname}}_ca_cert.pem" } +- { host: "{{ steno_host}}", sensor: "{{ steno_sensor }}", port: "{{ steno_port }}", key: "{{ docket_x509_key }}", cert: "{{docket_x509_dir}}/docket-{{ docket_host }}_sensor-{{ steno_sensor }}_cert.pem", ca: "{{ docket_x509_dir }}/{{ steno_sensor }}_ca_cert.pem" } diff --git a/playbooks/roles/docket/handlers/main.yml b/roles/docket/handlers/main.yml similarity index 76% rename from playbooks/roles/docket/handlers/main.yml rename to roles/docket/handlers/main.yml index 725b9fa3d..6d63e03d6 100644 --- a/playbooks/roles/docket/handlers/main.yml +++ b/roles/docket/handlers/main.yml @@ -1,12 +1,12 @@ --- # handlers file for rocknsm.docket -- name: docket | cleanup csr on docket host +- name: Cleanup csr on docket host file: path: "{{docket_x509_key}}.csr" state: absent when: "{{ inventory_hostname in groups['docket'] | bool }}" -- name: docket | cleanup csr on sensor hosts +- name: Cleanup csr on sensor hosts file: path: "{{steno_certs_dir}}/{{hostvars[item].inventory_hostname}}.csr" state: absent @@ -14,20 +14,20 @@ when: - "{{ inventory_hostname in groups['stenographer'] | bool}}" -- name: docket | restart redis +- name: Restart redis service: name: redis state: restarted when: docket_enable | bool -- name: docket | seed random key +- name: Seed random key lineinfile: path: /etc/docket/prod.yml regexp: 'XX_NOT_A_SECRET_XX' line: "SECRET_KEY: {{ docket_secret }}" state: present -- name: docket | restart docket celery services +- name: Restart docket celery services service: name: "{{ item }}" state: restarted @@ -36,13 +36,13 @@ - docket-celery-query when: docket_enable | bool -- name: docket | restart docket uwsgi +- name: Restart docket uwsgi service: name: docket state: restarted when: docket_enable | bool -- name: docket | restart lighttpd +- name: Restart lighttpd service: name: lighttpd state: restarted diff --git a/playbooks/roles/bro/meta/main.yml b/roles/docket/meta/main.yml similarity index 100% rename from playbooks/roles/bro/meta/main.yml rename to roles/docket/meta/main.yml diff --git a/playbooks/roles/docket/playbook.yml b/roles/docket/playbook.yml similarity index 100% rename from playbooks/roles/docket/playbook.yml rename to roles/docket/playbook.yml diff --git a/playbooks/roles/docket/tasks/crypto.yml b/roles/docket/tasks/crypto.yml similarity index 67% rename from playbooks/roles/docket/tasks/crypto.yml rename to roles/docket/tasks/crypto.yml index 6044f1ba1..1cc734a4f 100644 --- a/playbooks/roles/docket/tasks/crypto.yml +++ b/roles/docket/tasks/crypto.yml @@ -6,32 +6,32 @@ # Ensure `stenographer` and `nginx` groups exists # Configure docket app settings -- name: docket | ensure ansible_cache dir exists +- name: Ensure ansible_cache dir exists local_action: file path={{ ansible_cache }} state=directory mode=0700 changed_when: false # TODO: this should probably -- name: docket | ensure rock nsm conf dir exists +- name: Ensure rock nsm conf dir exists file: path: "{{ rocknsm_conf_dir }}" state: directory owner: "{{ rocknsm_conf_user }}" group: "{{ rocknsm_conf_group }}" -- name: docket | ensure docket x509 user exists +- name: Ensure docket x509 user exists user: name: "{{ docket_x509_user }}" state: present when: inventory_hostname in groups['docket'] -- name: docket | ensure docket x509 group exists +- name: Ensure docket x509 group exists group: name: "{{ docket_x509_group }}" system: yes state: present when: inventory_hostname in groups['docket'] -- name: docket | ensure docket x509 dir exists +- name: Ensure docket x509 dir exists file: path: "{{ docket_x509_dir }}" state: directory @@ -43,13 +43,13 @@ # Generate/copy x509 client cert/keys and CA certs # Use new openssl module in ansible 2.3 -- name: docket | create docket private key +- name: Create docket private key openssl_privatekey: path: "{{docket_x509_key}}" size: 4096 when: inventory_hostname in groups['docket'] -- name: docket | set perms on private key +- name: Set perms on private key file: path: "{{docket_x509_key}}" owner: "{{ docket_x509_user }}" @@ -57,9 +57,9 @@ mode: "0644" when: inventory_hostname in groups['docket'] -- name: docket | check for certificate +- name: Check for certificate stat: - path: "{{docket_x509_dir}}/docket-{{inventory_hostname}}_sensor-{{item}}_cert.pem" + path: "{{docket_x509_dir}}/docket-{{inventory_hostname_short}}_sensor-{{item}}_cert.pem" register: docket_cert changed_when: false with_items: "{{ groups['stenographer'] }}" @@ -68,7 +68,7 @@ - debug: var=docket_cert.results when: inventory_hostname in groups['docket'] -- name: docket | create docket csr +- name: Create docket csr openssl_csr: path: "{{ docket_x509_key }}.csr" privatekey_path: "{{ docket_x509_key }}" @@ -82,10 +82,10 @@ - docket_cert|json_query('results[?stat.exists==`false`]')|length register: new_csr -- name: docket | fetch csr +- name: Fetch csr fetch: src: "{{docket_x509_key}}.csr" - dest: "{{ ansible_cache }}/{{inventory_hostname}}.csr" + dest: "{{ ansible_cache }}/{{inventory_hostname_short}}.csr" flat: yes when: - inventory_hostname in groups['docket'] @@ -99,58 +99,58 @@ - "{{ groups['docket'] }}" - debug: - var: hostvars[item].docket_cert|json_query('results[?item==inventory_hostname]') - #selectattr("item", "equalto", inventory_hostname)|map(attribute="stat.exists")|select("equalto", false)|list|length + var: hostvars[item].docket_cert|json_query('results[?item==inventory_hostname_short]') + #selectattr("item", "equalto", inventory_hostname_short)|map(attribute="stat.exists")|select("equalto", false)|list|length with_items: - "{{ groups['docket'] }}" when: - inventory_hostname in groups['stenographer'] -- name: docket | push csr to stenographer hosts +- name: Push csr to stenographer hosts copy: - src: "{{ansible_cache}}/{{hostvars[item].inventory_hostname}}.csr" - dest: "{{steno_certs_dir}}/{{hostvars[item].inventory_hostname}}.csr" + src: "{{ansible_cache}}/{{hostvars[item].inventory_hostname_short}}.csr" + dest: "{{steno_certs_dir}}/{{hostvars[item].inventory_hostname_short}}.csr" with_items: - "{{ groups['docket'] }}" when: - inventory_hostname in groups['stenographer'] - - hostvars[item].docket_cert.results|selectattr("item", "equalto", inventory_hostname)|map(attribute="stat.exists")|select("equalto",false)|list|length + #- hostvars[item].docket_cert.results|selectattr("item", "equalto", inventory_hostname_short)|map(attribute="stat.exists")|select("equalto",false)|list|length -- name: docket | sign certificate signing requests +- name: Sign certificate signing requests openssl_certificate: - path: "{{steno_certs_dir}}/docket-{{hostvars[item].inventory_hostname}}_sensor-{{inventory_hostname}}_cert.pem" - privatekey_path: "{{steno_ca_key}}" - csr_path: "{{steno_certs_dir}}/{{hostvars[item].inventory_hostname}}.csr" - provider: localsigned - cacert_path: "{{steno_ca_cert}}" + path: "{{steno_certs_dir}}/docket-{{hostvars[item].inventory_hostname_short}}_sensor-{{inventory_hostname_short}}_cert.pem" + csr_path: "{{steno_certs_dir}}/{{hostvars[item].inventory_hostname_short}}.csr" + ownca_privatekey_path: "{{steno_ca_key}}" + ownca_path: "{{steno_ca_cert}}" + provider: ownca with_items: "{{ groups['docket'] }}" when: - inventory_hostname in groups['stenographer'] - hostvars[item].docket_cert.results|map(attribute="stat.exists")|select("equalto",false)|list|length -- name: docket | pull certificates back +- name: Pull certificates back fetch: - src: "{{steno_certs_dir}}/docket-{{hostvars[item].inventory_hostname}}_sensor-{{inventory_hostname}}_cert.pem" - dest: "{{ansible_cache}}/docket-{{hostvars[item].inventory_hostname}}_sensor-{{inventory_hostname}}_cert.pem" + src: "{{steno_certs_dir}}/docket-{{hostvars[item].inventory_hostname_short}}_sensor-{{inventory_hostname_short}}_cert.pem" + dest: "{{ansible_cache}}/docket-{{hostvars[item].inventory_hostname_short}}_sensor-{{inventory_hostname_short}}_cert.pem" flat: yes with_items: "{{ groups['docket'] }}" when: - inventory_hostname in groups['stenographer'] - hostvars[item].docket_cert.results|map(attribute="stat.exists")|select("equalto",false)|list|length -- name: docket | pull back ca certificates +- name: Pull back ca certificates fetch: src: "{{steno_ca_cert}}" - dest: "{{ansible_cache}}/{{inventory_hostname}}_ca_cert.pem" + dest: "{{ansible_cache}}/{{inventory_hostname_short}}_ca_cert.pem" flat: yes when: - inventory_hostname in groups['stenographer'] changed_when: false -- name: docket | push certificates to docket hosts +- name: Push certificates to docket hosts copy: - src: "{{ansible_cache}}/docket-{{inventory_hostname}}_sensor-{{hostvars[item].inventory_hostname}}_cert.pem" - dest: "{{docket_x509_dir}}/docket-{{inventory_hostname}}_sensor-{{hostvars[item].inventory_hostname}}_cert.pem" + src: "{{ansible_cache}}/docket-{{inventory_hostname_short}}_sensor-{{hostvars[item].inventory_hostname_short}}_cert.pem" + dest: "{{docket_x509_dir}}/docket-{{inventory_hostname_short}}_sensor-{{hostvars[item].inventory_hostname_short}}_cert.pem" owner: "{{ docket_x509_user }}" group: "{{ docket_x509_group }}" mode: "0644" @@ -159,10 +159,10 @@ - inventory_hostname in groups['docket'] - docket_cert.results|map(attribute="stat.exists")|select("equalto",false)|list|length -- name: docket | push stenographer ca certs +- name: Push stenographer ca certs copy: - src: "{{ansible_cache}}/{{hostvars[item].inventory_hostname}}_ca_cert.pem" - dest: "{{docket_x509_dir}}/{{hostvars[item].inventory_hostname}}_ca_cert.pem" + src: "{{ansible_cache}}/{{hostvars[item].inventory_hostname_short}}_ca_cert.pem" + dest: "{{docket_x509_dir}}/{{hostvars[item].inventory_hostname_short}}_ca_cert.pem" owner: "{{ docket_x509_user }}" group: "{{ docket_x509_group }}" mode: "0644" @@ -170,7 +170,7 @@ when: - inventory_hostname in groups['docket'] -- name: docket | cleanup {{ ansible_cache }} dir +- name: Cleanup {{ ansible_cache }} dir file: name: "{{ ansible_cache }}/" state: absent diff --git a/playbooks/roles/docket/tasks/docket_config.yml b/roles/docket/tasks/docket_config.yml similarity index 57% rename from playbooks/roles/docket/tasks/docket_config.yml rename to roles/docket/tasks/docket_config.yml index 1cfd57f5a..300bed72f 100644 --- a/playbooks/roles/docket/tasks/docket_config.yml +++ b/roles/docket/tasks/docket_config.yml @@ -1,5 +1,5 @@ --- -- name: docker | check existing secret_key +- name: Check existing secret_key shell: > cat /etc/docket/prod.yaml | awk '/^SECRET_KEY/ {print $2}' register: docket_prod @@ -7,43 +7,43 @@ - debug: msg="{{ docket_prod }}" -- name: docket | keep existing secret_key +- name: Keep existing secret_key set_fact: docket_secret="{{ docket_prod.stdout }}" when: '"CHANGE_THIS" not in docket_prod.stdout' -- name: docket | set production docket config +- name: Set production docket config template: src: docket_prod.yaml.j2 dest: /etc/docket/prod.yaml notify: - - docket | restart docket uwsgi - - docket | restart docket celery services + - Restart docket uwsgi + - Restart docket celery services -- name: docket | set uwsgi config +- name: Set uwsgi config template: src: docket-uwsgi.ini.j2 dest: /etc/docket/docket-uwsgi.ini notify: - - docket | restart docket uwsgi + - Restart docket uwsgi -- name: docket | enable redis +- name: Enable redis service: name: redis enabled: yes - notify: docket | restart redis + notify: Restart redis when: docket_enable -- name: docket | enable docket celery services +- name: Enable docket celery services service: name: "{{ item }}" enabled: "{{ docket_enable | bool }}" - notify: docket | restart docket celery services + notify: Restart docket celery services with_items: - docket-celery-io - docket-celery-query -- name: docket | enable docket uwsgi service +- name: Enable docket uwsgi service service: name: docket enabled: "{{ docket_enable | bool }}" - notify: docket | restart docket uwsgi + notify: Restart docket uwsgi diff --git a/roles/docket/tasks/install.yml b/roles/docket/tasks/install.yml new file mode 100644 index 000000000..58db3df78 --- /dev/null +++ b/roles/docket/tasks/install.yml @@ -0,0 +1,29 @@ +--- +- name: Configure RockNSM online repos + yum_repository: + file: rocknsm + name: "{{ item.name }}" + enabled: "{{ rock_online_install }}" + description: "{{ item.name }}" + baseurl: "{{ item.baseurl }}" + repo_gpgcheck: 1 + gpgcheck: "{{ item.gpgcheck }}" + gpgkey: + - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_3 + - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2 + sslverify: 1 + sslcacert: /etc/pki/tls/certs/ca-bundle.crt + metadata_expire: 300 + cost: 750 + state: present + with_items: + - { name: "rocknsm_2_3", gpgcheck: yes, baseurl: "{{ rocknsm_baseurl }}" } + - { name: "rocknsm_2_3-source", gpgcheck: no, baseurl: "{{ rocknsm_srpm_baseurl }}" } + when: "{{ docket_install == 'yumrepo' }}" + +- name: Install packages + yum: + name: + - docket + - lighttpd + state: present diff --git a/playbooks/roles/docket/tasks/lighttpd.yml b/roles/docket/tasks/lighttpd.yml similarity index 70% rename from playbooks/roles/docket/tasks/lighttpd.yml rename to roles/docket/tasks/lighttpd.yml index 59d98538c..7d6fd8596 100644 --- a/playbooks/roles/docket/tasks/lighttpd.yml +++ b/roles/docket/tasks/lighttpd.yml @@ -10,13 +10,13 @@ # dest: /etc/lighttpd/vhosts.d/docket.conf # notify: docket | restart lighttpd -- name: docket | create lighttpd + uwsgi config +- name: Create lighttpd + uwsgi config template: src: lighttpd-30-docket.conf.j2 dest: /etc/lighttpd/vhosts.d/30-docket.conf - notify: docket | restart lighttpd + notify: Restart lighttpd -- name: docket | create vhost logdir +- name: Create vhost logdir file: state: directory path: "/var/log/lighttpd/{{ docket_web_server_name }}/" @@ -24,22 +24,22 @@ group: lighttpd mode: 0755 -- name: docket | enable lighttpd vhosts +- name: Enable lighttpd vhosts lineinfile: path: /etc/lighttpd/lighttpd.conf regexp: '^#?\s*include.*vhosts\.d/.*$' line: include "/etc/lighttpd/vhosts.d/*.conf" - notify: docket | restart lighttpd + notify: Restart lighttpd -- name: docket | add lighttpd into docket group +- name: Add lighttpd into docket group user: name: lighttpd append: yes groups: "{{ docket_group }}" - notify: docket | restart lighttpd + notify: Restart lighttpd -- name: docket | enable lighttpd service +- name: Enable lighttpd service service: name: lighttpd enabled: yes - notify: docket | restart lighttpd + notify: Restart lighttpd diff --git a/playbooks/roles/docket/tasks/main.yml b/roles/docket/tasks/main.yml similarity index 74% rename from playbooks/roles/docket/tasks/main.yml rename to roles/docket/tasks/main.yml index f6d7b5de2..e81afc84d 100644 --- a/playbooks/roles/docket/tasks/main.yml +++ b/roles/docket/tasks/main.yml @@ -7,14 +7,17 @@ # Install packages - import_tasks: install.yml + when: inventory_hostname in groups['docket'] # Generate/copy x509 client cert/keys and CA certs - import_tasks: crypto.yml # Configure docket app settings - import_tasks: docket_config.yml + when: inventory_hostname in groups['docket'] # Configure web server settings - import_tasks: lighttpd.yml + when: inventory_hostname in groups['docket'] # Enable / Activate Services diff --git a/playbooks/roles/docket/tasks/prereqs.yml b/roles/docket/tasks/prereqs.yml similarity index 78% rename from playbooks/roles/docket/tasks/prereqs.yml rename to roles/docket/tasks/prereqs.yml index fa3bfd1a4..857251b8d 100644 --- a/playbooks/roles/docket/tasks/prereqs.yml +++ b/roles/docket/tasks/prereqs.yml @@ -2,19 +2,19 @@ # prepreqs checks for rocknsm.docket # Validate hosts exist in stenographer group -- name: docket | check for docket and stenographer hosts +- name: Check for docket and stenographer hosts assert: that: - "{{ ('docket' in groups) and (groups['docket'] | length) > 0 }}" - "{{ ('stenographer' in groups) and (groups['stenographer'] | length) > 0 }}" msg: "The [docket] and [stenographer] inventory groups must each have at least one host." -- name: docket | check docket and stenographer hosts for pyopenssl +- name: Check docket and stenographer hosts for pyopenssl yum: list=*pyOpenSSL register: pyopenssl_status -- name: docket | validate pyopenssl >= 15.0 is installed +- name: Validate pyopenssl >= 15.0 is installed assert: that: - "{{pyopenssl_status.results|selectattr('yumstate', 'match', 'installed')|map(attribute='version') is version_compare('15.0.0', '>=' )}}" diff --git a/playbooks/roles/docket/templates/docket-uwsgi.ini.j2 b/roles/docket/templates/docket-uwsgi.ini.j2 similarity index 100% rename from playbooks/roles/docket/templates/docket-uwsgi.ini.j2 rename to roles/docket/templates/docket-uwsgi.ini.j2 diff --git a/playbooks/roles/docket/templates/docket_lighttpd_scgi.conf.j2 b/roles/docket/templates/docket_lighttpd_scgi.conf.j2 similarity index 100% rename from playbooks/roles/docket/templates/docket_lighttpd_scgi.conf.j2 rename to roles/docket/templates/docket_lighttpd_scgi.conf.j2 diff --git a/playbooks/roles/docket/templates/docket_lighttpd_vhost.conf.j2 b/roles/docket/templates/docket_lighttpd_vhost.conf.j2 similarity index 100% rename from playbooks/roles/docket/templates/docket_lighttpd_vhost.conf.j2 rename to roles/docket/templates/docket_lighttpd_vhost.conf.j2 diff --git a/playbooks/roles/docket/templates/docket_prod.yaml.j2 b/roles/docket/templates/docket_prod.yaml.j2 similarity index 97% rename from playbooks/roles/docket/templates/docket_prod.yaml.j2 rename to roles/docket/templates/docket_prod.yaml.j2 index 54a888e7c..d55e8a9b5 100644 --- a/playbooks/roles/docket/templates/docket_prod.yaml.j2 +++ b/roles/docket/templates/docket_prod.yaml.j2 @@ -25,6 +25,7 @@ SPOOL_DIR: {{ docket_spool_dir }} # http://HOST:PORT/api/stats/ # http://HOST:PORT/api/uri/host/1.2.3.4/ WEB_ROOT: {{ docket_url_apppath }}/api +UI_WEB_ROOT: {{ docket_url_apppath }} # WEB_ROOT the base url for PCAP requests: default is /results # example: # http://HOST:PORT/results//merged.pcap diff --git a/playbooks/roles/docket/templates/lighttpd-30-docket.conf.j2 b/roles/docket/templates/lighttpd-30-docket.conf.j2 similarity index 100% rename from playbooks/roles/docket/templates/lighttpd-30-docket.conf.j2 rename to roles/docket/templates/lighttpd-30-docket.conf.j2 diff --git a/playbooks/roles/docket/test.sh b/roles/docket/test.sh similarity index 100% rename from playbooks/roles/docket/test.sh rename to roles/docket/test.sh diff --git a/playbooks/roles/docket/tests/inventory b/roles/docket/tests/inventory similarity index 100% rename from playbooks/roles/docket/tests/inventory rename to roles/docket/tests/inventory diff --git a/playbooks/roles/docket/tests/test.yml b/roles/docket/tests/test.yml similarity index 100% rename from playbooks/roles/docket/tests/test.yml rename to roles/docket/tests/test.yml diff --git a/playbooks/roles/docket/vars/main.yml b/roles/docket/vars/main.yml similarity index 100% rename from playbooks/roles/docket/vars/main.yml rename to roles/docket/vars/main.yml diff --git a/roles/elasticsearch/defaults/main.yml b/roles/elasticsearch/defaults/main.yml new file mode 100644 index 000000000..80c7dc5d2 --- /dev/null +++ b/roles/elasticsearch/defaults/main.yml @@ -0,0 +1,15 @@ +--- +es_user: elasticsearch +es_group: elasticsearch +es_data_dir: "{{ rock_data_dir }}/elasticsearch" +es_cluster_name: rocknsm +es_node_name: "{{ ansible_hostname }}" +es_network_host: "{{ '_site:ipv4_' if ( groups['elasticsearch'] | length ) > 1 else '_local:ipv4_' }}" +es_action_auto_create_index: true +es_min_master_nodes: "{{ 2 if ( groups['es_masters'] | length ) == 3 else 1 }}" +es_mem: "{{ (ansible_memtotal_mb // 1024 // 2) if (ansible_memtotal_mb // 1024) < 64 else 31 }}" +es_url: "http://127.0.0.1:9200" +es_log_dir: /var/log/elasticsearch +es_memlock_override: | + [Service] + LimitMEMLOCK=infinity diff --git a/playbooks/files/es-default-mapping.json b/roles/elasticsearch/files/default-mapping.json similarity index 100% rename from playbooks/files/es-default-mapping.json rename to roles/elasticsearch/files/default-mapping.json diff --git a/roles/elasticsearch/handlers/main.yml b/roles/elasticsearch/handlers/main.yml new file mode 100644 index 000000000..6aa65275c --- /dev/null +++ b/roles/elasticsearch/handlers/main.yml @@ -0,0 +1,9 @@ +--- +- name: Reload systemd + systemd: + daemon_reload: yes + +- name: Restart elasticsearch + service: + name: elasticsearch + state: restarted diff --git a/roles/elasticsearch/tasks/main.yml b/roles/elasticsearch/tasks/main.yml new file mode 100644 index 000000000..fae0a153d --- /dev/null +++ b/roles/elasticsearch/tasks/main.yml @@ -0,0 +1,106 @@ +--- + +- name: Install packages + yum: + name: + - java-1.8.0-openjdk-headless + - elasticsearch + state: installed + +- name: Create elasticsearch directory + file: + path: "{{ es_data_dir }}" + mode: 0755 + owner: "{{ es_user }}" + group: "{{ es_group }}" + state: directory + +- name: Setup elasticsearch config + template: + src: elasticsearch.yml.j2 + dest: /etc/elasticsearch/elasticsearch.yml + owner: root + group: "{{ es_group }}" + mode: 0640 + notify: Restart elasticsearch + +- name: Create elasticsearch systemd override dir + file: + path: /etc/systemd/system/elasticsearch.service.d + owner: root + group: root + mode: 0755 + state: directory + notify: + - Reload systemd + - Restart elasticsearch + +- name: Enable elasticsearch memlock in service override + copy: + content: "{{ es_memlock_override }}" + dest: /etc/systemd/system/elasticsearch.service.d/override.conf + mode: 0644 + owner: root + group: root + notify: + - Reload systemd + - Restart elasticsearch + +- name: Setup elasticsearch JVM options + template: + src: templates/es-jvm.options.j2 + dest: /etc/elasticsearch/jvm.options + mode: 0640 + owner: root + group: "{{ es_group }}" + notify: Restart elasticsearch + +- name: Enable and start elasticsearch + service: + name: elasticsearch + state: "started" + enabled: "{{ enable_elasticsearch }}" + +- name: Configure firewall ports + firewalld: + port: "{{ item }}/tcp" + permanent: yes + state: enabled + immediate: yes + when: groups['elasticsearch'] | count > 1 + loop: + - 9200 + - 9300 + +- name: Flush handlers + meta: flush_handlers + +- name: Wait for elasticsearch to become ready + wait_for: + host: "{{ ansible_host }}" + port: 9200 + run_once: true + +- name: Check for default mapping template + uri: + method: "GET" + url: "{{ es_url }}/_template/default" + failed_when: False + register: default_index_template + run_once: true + +- name: Load default elasticsearch mapping template + uri: + method: PUT + url: "{{ es_url }}/_template/default" + body: "{{ lookup('file', 'default-mapping.json')}}" + body_format: json + when: with_elasticsearch and default_index_template.status != 200 + run_once: true + +- name: Blanket install/update elasticsearch mappings + command: ./import-index-templates.sh "{{ es_url }}" + args: + chdir: "{{ rock_module_dir }}/configuration/elasticsearch" + changed_when: false + run_once: true diff --git a/roles/elasticsearch/templates/elasticsearch.yml.j2 b/roles/elasticsearch/templates/elasticsearch.yml.j2 new file mode 100644 index 000000000..e29fedfd0 --- /dev/null +++ b/roles/elasticsearch/templates/elasticsearch.yml.j2 @@ -0,0 +1,36 @@ +{% if es_path_repo is defined %} +path.repo: {{ es_path_repo }} +{% endif %} + +cluster.name: {{ es_cluster_name }} +node.name: {{ es_node_name }} +path.data: {{ es_data_dir }} +path.logs: {{ es_log_dir }} +bootstrap.memory_lock: true +network.host: {{ es_network_host }} +discovery.zen.minimum_master_nodes: {{ es_min_master_nodes }} + +{% if groups['elasticsearch'] > 1 %} +discovery.zen.ping.unicast.hosts: +{% for host in query('inventory_hostnames', 'es_masters') %} + - {{ host }} +{% endfor %} + +{% if elastic.major_version > 6 %} +cluster.initial_master_nodes: +{% for host in query('inventory_hostnames', 'es_masters') %} + - {{ host }} +{% endfor %} +{% endif %} +{% else %} +discovery.type: single-node + +{% endif %} + +action.auto_create_index: {{ es_action_auto_create_index }} +action.destructive_requires_name: true + +# Node Roles +node.master: {{ node_master }} +node.data: {{ node_data }} +node.ingest: {{ node_ingest }} diff --git a/playbooks/templates/es-jvm.options.j2 b/roles/elasticsearch/templates/es-jvm.options.j2 similarity index 100% rename from playbooks/templates/es-jvm.options.j2 rename to roles/elasticsearch/templates/es-jvm.options.j2 diff --git a/roles/fsf/handlers/main.yml b/roles/fsf/handlers/main.yml new file mode 100644 index 000000000..5e9e14725 --- /dev/null +++ b/roles/fsf/handlers/main.yml @@ -0,0 +1,7 @@ +--- +# handlers file for suricata + +- name: Restart filebeat + systemd: + name: filebeat + state: restarted diff --git a/roles/fsf/tasks/main.yml b/roles/fsf/tasks/main.yml new file mode 100644 index 000000000..8a8112fd9 --- /dev/null +++ b/roles/fsf/tasks/main.yml @@ -0,0 +1,78 @@ +--- + +- name: Install packages + yum: + name: + - fsf + - filebeat + state: present + +- name: Create filebeat config directory + file: + path: /etc/filebeat/configs + mode: 0755 + owner: root + group: root + state: directory + +- name: Add filebeat configuration file + template: + src: "{{ item.src }}" + dest: "/etc/filebeat/{{ item.dest }}" + notify: Restart filebeat + loop: + - { src: 'filebeat.yml.j2', dest: 'filebeat.yml' } + - { src: 'fb-fsf.yml.j2', dest: 'configs/fsf.yml' } + +- name: Enable and start filebeat + service: + name: filebeat + state: "{{ 'started' if enable_filebeat else 'stopped' }}" + enabled: "{{ enable_filebeat }}" + +- name: Create FSF data directory + file: + path: "{{ fsf_data_dir }}" + mode: 0755 + owner: "{{ fsf_user }}" + group: "{{ fsf_group }}" + state: directory + setype: var_log_t + +- name: Create FSF archive directory + file: + path: "{{ fsf_archive_dir }}" + mode: 0755 + owner: "{{ fsf_user }}" + group: "{{ fsf_group }}" + state: directory + +- name: Configure logrotate for FSF logs + template: + src: templates/logrotate-fsf.j2 + dest: /etc/logrotate.d/fsf + mode: 0644 + owner: root + group: root + +- name: Configure fsf-server + template: + src: templates/fsf-server-config.j2 + dest: /opt/fsf/fsf-server/conf/config.py + owner: "{{ fsf_user }}" + group: "{{ fsf_group }}" + mode: 0644 + +- name: Configure fsf-client + template: + src: templates/fsf-client-config.j2 + dest: /opt/fsf/fsf-client/conf/config.py + owner: "{{ fsf_user }}" + group: "{{ fsf_group }}" + mode: 0644 + +- name: Enable and start FSF + service: + name: fsf + state: "{{ 'started' if enable_fsf else 'stopped' }}" + enabled: "{{ enable_fsf }}" diff --git a/roles/fsf/templates/fb-fsf.yml.j2 b/roles/fsf/templates/fb-fsf.yml.j2 new file mode 100644 index 000000000..cea7d6d80 --- /dev/null +++ b/roles/fsf/templates/fb-fsf.yml.j2 @@ -0,0 +1,7 @@ +- input_type: log + paths: + - {{ rock_data_dir }}/fsf/rockout.log + json.keys_under_root: true + fields: + kafka_topic: fsf-raw + fields_under_root: true diff --git a/playbooks/templates/fsf-client-config.j2 b/roles/fsf/templates/fsf-client-config.j2 similarity index 100% rename from playbooks/templates/fsf-client-config.j2 rename to roles/fsf/templates/fsf-client-config.j2 diff --git a/playbooks/templates/fsf-server-config.j2 b/roles/fsf/templates/fsf-server-config.j2 similarity index 100% rename from playbooks/templates/fsf-server-config.j2 rename to roles/fsf/templates/fsf-server-config.j2 diff --git a/playbooks/templates/logrotate-fsf.j2 b/roles/fsf/templates/logrotate-fsf.j2 similarity index 100% rename from playbooks/templates/logrotate-fsf.j2 rename to roles/fsf/templates/logrotate-fsf.j2 diff --git a/playbooks/roles/kafka/README.md b/roles/kafka/README.md similarity index 100% rename from playbooks/roles/kafka/README.md rename to roles/kafka/README.md diff --git a/playbooks/roles/kafka/defaults/main.yml b/roles/kafka/defaults/main.yml similarity index 100% rename from playbooks/roles/kafka/defaults/main.yml rename to roles/kafka/defaults/main.yml diff --git a/roles/kafka/handlers/main.yml b/roles/kafka/handlers/main.yml new file mode 100644 index 000000000..16d956648 --- /dev/null +++ b/roles/kafka/handlers/main.yml @@ -0,0 +1,29 @@ +--- +# handlers file for kafka + +- name: Create kafka bro topic + command: > + /opt/kafka/bin/kafka-topics.sh + --zookeeper 127.0.0.1:2181 + --create + --replication-factor 1 + --topic bro-raw + --partitions 1 + +- name: Create kafka suricata topic + command: > + /opt/kafka/bin/kafka-topics.sh + --zookeeper 127.0.0.1:2181 + --create + --replication-factor 1 + --topic suricata-raw + --partitions 1 + +- name: Create kafka fsf topic + command: > + /opt/kafka/bin/kafka-topics.sh + --zookeeper 127.0.0.1:2181 + --create + --replication-factor 1 + --topic fsf-raw + --partitions 1 diff --git a/roles/kafka/tasks/main.yml b/roles/kafka/tasks/main.yml new file mode 100644 index 000000000..a07ea81d1 --- /dev/null +++ b/roles/kafka/tasks/main.yml @@ -0,0 +1,48 @@ +--- +# tasks file for kafka + + +- name: Install packages + yum: + name: + - kafka + - kafkacat + state: present + +- name: Create kafka data directory + file: + path: "{{ kafka_data_dir }}" + mode: 0755 + owner: "{{ kafka_user }}" + group: "{{ kafka_group }}" + state: directory + +- name: Set kafka retention + lineinfile: + dest: "{{ kafka_config_path }}" + regexp: "log.retention.hours=" + line: "log.retention.hours={{ kafka_retention }}" + state: present + +- name: Set kafka data directory + lineinfile: + dest: "{{ kafka_config_path }}" + regexp: "log.dirs=" + line: "log.dirs={{ kafka_data_dir }}" + +- name: Enable and start kafka + service: + name: kafka + state: "{{ 'started' if enable_kafka else 'stopped' }}" + enabled: "{{ enable_kafka }}" + + +- name: Configure firewall ports + firewalld: + port: "{{ item }}/tcp" + permanent: yes + state: enabled + immediate: yes + loop: + - 9092 + when: groups['kafka'] | difference(groups['logstash']) | count > 0 diff --git a/playbooks/roles/kafka/tests/inventory b/roles/kafka/tests/inventory similarity index 100% rename from playbooks/roles/kafka/tests/inventory rename to roles/kafka/tests/inventory diff --git a/playbooks/roles/kafka/tests/test.yml b/roles/kafka/tests/test.yml similarity index 100% rename from playbooks/roles/kafka/tests/test.yml rename to roles/kafka/tests/test.yml diff --git a/playbooks/roles/kafka/vars/main.yml b/roles/kafka/vars/main.yml similarity index 100% rename from playbooks/roles/kafka/vars/main.yml rename to roles/kafka/vars/main.yml diff --git a/roles/kibana/files/profile.d-kibanapw.sh b/roles/kibana/files/profile.d-kibanapw.sh new file mode 100644 index 000000000..31bd4a0cf --- /dev/null +++ b/roles/kibana/files/profile.d-kibanapw.sh @@ -0,0 +1,2 @@ +# Set passwords +function kibanapw() { if [ $# -lt 2 ]; then echo -e "Usage: kibanapw USER PASSWORD\nUsers will be added to /etc/nginx/htpasswd.users"; else egrep "^${1}:" /etc/lighttpd/rock-htpasswd.user > /dev/null 2>&1; if [[ $? -eq 0 ]]; then sudo sed -i "/${1}\:/d" /etc/lighttpd/rock-htpasswd.user; fi; printf "${1}:$(echo ${2} | openssl passwd -apr1 -stdin)\n" | sudo tee -a /etc/lighttpd/rock-htpasswd.user > /dev/null 2>&1; fi; } diff --git a/roles/kibana/handlers/main.yml b/roles/kibana/handlers/main.yml new file mode 100644 index 000000000..7498487cf --- /dev/null +++ b/roles/kibana/handlers/main.yml @@ -0,0 +1,6 @@ +--- + +- name: Restart kibana + service: + name: kibana + state: restarted diff --git a/roles/kibana/tasks/main.yml b/roles/kibana/tasks/main.yml new file mode 100644 index 000000000..1f5cfdab6 --- /dev/null +++ b/roles/kibana/tasks/main.yml @@ -0,0 +1,86 @@ +--- + +- name: Install packages + yum: + name: kibana + state: present + +- name: Update kibana config + template: + src: kibana.yml.j2 + dest: /etc/kibana/kibana.yml + notify: Restart kibana + +- name: Flush handlers + meta: flush_handlers + +- name: Enable and start kibana + service: + name: kibana + state: "started" + enabled: "{{ enable_kibana }}" + +- name: "Wait for Kibana to be available" + uri: + url: "{{kibana_url}}/api/kibana/settings" + status_code: 200 + register: result + until: result.status == 200 + retries: 60 + delay: 1 + +- name: Blanket install/update kibana saved objects + command: ./import-saved-items.sh "{{ kibana_url }}" + args: + chdir: "{{rock_module_dir}}/configuration/kibana" + changed_when: false + # TODO: Fix this ^^ + +- name: Configure kibana templates + uri: + method: PUT + url: "{{ es_url }}/_template/kibana-config" + body: > + { "order" : 0, "template" : ".kibana", + "settings" : + { "index.number_of_replicas" : "0", + "index.number_of_shards" : "1" }, + "mappings" : { }, "aliases" : { } } + body_format: json + status_code: 200,201 + +- name: Add the kibanapw shell function + copy: + src: profile.d-kibanapw.sh + dest: /etc/profile.d/kibanapw.sh + mode: 0644 + owner: root + group: root + +- name: Set initial kibana credentials + shell: > + export kibuser=$(getent passwd 1000 | awk -F: '{print $1}') && \ + export kibpw=$(xkcdpass -a rock) && \ + echo -e "U: ${kibuser}\nP: ${kibpw}" > /home/${kibuser}/KIBANA_CREDS.README && \ + printf "${kibuser}:$(echo ${kibpw} | openssl passwd -apr1 -stdin)\n" | \ + sudo tee -a /etc/lighttpd/rock-htpasswd.user > /dev/null 2>&1 + args: + creates: /etc/lighttpd/rock-htpasswd.user + when: with_lighttpd + +- name: Download RockNSM elastic configs + get_url: + url: "{{ rock_dashboards_url }}" + dest: "{{ rock_cache_dir }}/{{ rock_dashboards_filename }}" + mode: 0644 + when: (with_elasticsearch or with_logstash) and rock_online_install + +- name: Extract RockNSM elastic configs + unarchive: + src: "{{ rock_cache_dir }}/{{ rock_dashboards_filename }}" + dest: /opt/rocknsm + owner: root + group: root + creates: "{{ rock_module_dir }}" + remote_src: yes + when: (with_elasticsearch or with_logstash) diff --git a/roles/kibana/templates/kibana.yml.j2 b/roles/kibana/templates/kibana.yml.j2 new file mode 100644 index 000000000..39c79131b --- /dev/null +++ b/roles/kibana/templates/kibana.yml.j2 @@ -0,0 +1,3 @@ +server.port: {{ kibana_port }} +server.name: "{{ ansible_hostname }}" +elasticsearch.url: "{{ es_url }}" diff --git a/roles/lighttpd/handlers/main.yml b/roles/lighttpd/handlers/main.yml new file mode 100644 index 000000000..5f0ec0b69 --- /dev/null +++ b/roles/lighttpd/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Enable and restart lighttpd + systemd: + name: lighttpd + state: "{{ 'restarted' if enable_lighttpd else 'stopped' }}" + enabled: "{{ enable_lighttpd }}" diff --git a/roles/lighttpd/tasks/main.yml b/roles/lighttpd/tasks/main.yml new file mode 100644 index 000000000..fd8e77c15 --- /dev/null +++ b/roles/lighttpd/tasks/main.yml @@ -0,0 +1,97 @@ +--- + +- name: Install packages + yum: + name: + - lighttpd + - python2-xkcdpass + state: present + +- name: Install ROCK lighttpd configuration + template: + src: templates/{{ item }}.j2 + dest: /etc/lighttpd/vhosts.d/{{ item }} + mode: 0644 + owner: root + group: root + when: with_kibana + with_items: + - 10-rock-auth.conf + - 10-tls.conf + - 20-rock-vars.conf + - 50-rockproxy.conf + notify: Enable and restart lighttpd + +- name: Enable lighttpd vhosts + lineinfile: + path: /etc/lighttpd/lighttpd.conf + regexp: '^#?\s*include.*vhosts\.d/.*$' + line: include "/etc/lighttpd/vhosts.d/*.conf" + notify: Enable and restart lighttpd + +- name: Enable lighttpd to perform proxy connect + seboolean: + name: httpd_can_network_connect + state: yes + persistent: yes + when: with_kibana + +- name: Generate sensor private key + openssl_privatekey: + path: "{{ http_tls_key }}" + when: with_kibana + notify: Enable and restart lighttpd + +- name: Generate sensor public key + openssl_publickey: + path: "{{ http_tls_pub }}" + privatekey_path: "{{ http_tls_key }}" + when: with_kibana + notify: Enable and restart lighttpd + +- name: Generate sensor CSR + openssl_csr: + path: "{{ http_tls_pub }}.csr" + privatekey_path: "{{ http_tls_key }}" + country_name: US + state_or_province_name: MO + locality_name: St. Louis + organization_name: RockNSM + organizational_unit_name: NSM Ninjas + email_address: info@rocknsm.io + common_name: "{{ ansible_hostname }}" + when: with_kibana + notify: Enable and restart lighttpd + +- name: Generate sensor certificate + openssl_certificate: + path: "{{ http_tls_crt }}" + privatekey_path: "{{ http_tls_key }}" + csr_path: "{{ http_tls_pub }}.csr" + provider: selfsigned + when: with_kibana + notify: Enable and restart lighttpd + +- name: Combine sensor cert and key + shell: > + cat {{http_tls_key}} {{http_tls_crt}} > {{http_tls_combined}} + args: + creates: "{{ http_tls_combined }}" + notify: Enable and restart lighttpd + +- name: Generate DH parameters + command: > + openssl dhparam -out {{http_tls_dhparams}} 2048 + args: + creates: "{{http_tls_dhparams}}" + when: with_kibana + notify: Enable and restart lighttpd + +- name: Configure firewall ports + firewalld: + port: "{{ item }}/tcp" + permanent: yes + state: enabled + immediate: yes + loop: + - 443 diff --git a/playbooks/templates/lighttpd-10-rock-auth.conf.j2 b/roles/lighttpd/templates/10-rock-auth.conf.j2 similarity index 100% rename from playbooks/templates/lighttpd-10-rock-auth.conf.j2 rename to roles/lighttpd/templates/10-rock-auth.conf.j2 diff --git a/playbooks/templates/lighttpd-10-tls.conf.j2 b/roles/lighttpd/templates/10-tls.conf.j2 similarity index 100% rename from playbooks/templates/lighttpd-10-tls.conf.j2 rename to roles/lighttpd/templates/10-tls.conf.j2 diff --git a/playbooks/templates/lighttpd-20-rock-vars.conf.j2 b/roles/lighttpd/templates/20-rock-vars.conf.j2 similarity index 100% rename from playbooks/templates/lighttpd-20-rock-vars.conf.j2 rename to roles/lighttpd/templates/20-rock-vars.conf.j2 diff --git a/playbooks/templates/lighttpd-50-rockproxy.conf.j2 b/roles/lighttpd/templates/50-rockproxy.conf.j2 similarity index 100% rename from playbooks/templates/lighttpd-50-rockproxy.conf.j2 rename to roles/lighttpd/templates/50-rockproxy.conf.j2 diff --git a/roles/logstash/handlers/main.yml b/roles/logstash/handlers/main.yml new file mode 100644 index 000000000..026edc0f6 --- /dev/null +++ b/roles/logstash/handlers/main.yml @@ -0,0 +1,6 @@ +--- + +- name: Restart logstash + systemd: + name: logstash + state: restarted diff --git a/roles/logstash/tasks/main.yml b/roles/logstash/tasks/main.yml new file mode 100644 index 000000000..8d767c12e --- /dev/null +++ b/roles/logstash/tasks/main.yml @@ -0,0 +1,115 @@ +--- + +- name: Install packages + yum: + name: + - java-1.8.0-openjdk-headless + - logstash + state: present + +- name: Add bro input/output for logstash + template: + src: "{{ item }}.j2" + dest: "/etc/logstash/conf.d/{{ item }}.conf" + owner: "{{ logstash_user }}" + group: "{{ logstash_group }}" + mode: 0640 + when: with_bro and with_kafka + notify: Restart logstash + with_items: + - logstash-100-input-kafka-bro + - logstash-999-output-es-bro + +- name: Install bro-kafka filter for logstash + copy: + src: "{{rock_module_dir}}/configuration/logstash/{{item}}" + dest: "/etc/logstash/conf.d/{{item}}" + mode: 0640 + owner: "{{ logstash_user }}" + group: "{{ logstash_group }}" + remote_src: "yes" + when: with_bro and with_kafka + notify: Restart logstash + with_items: + - logstash-500-filter-bro.conf + +- name: Add suricata input/output for logstash + template: + src: "{{ item }}.j2" + dest: "/etc/logstash/conf.d/{{ item }}.conf" + owner: "{{ logstash_user }}" + group: "{{ logstash_group }}" + mode: 0640 + when: with_suricata and with_kafka + notify: Restart logstash + with_items: + - logstash-100-input-kafka-suricata + - logstash-999-output-es-suricata + +- name: Install suricata-kafka filter for logstash + copy: + src: "{{rock_module_dir}}/configuration/logstash/{{item}}" + dest: "/etc/logstash/conf.d/{{item}}" + mode: 0640 + owner: "{{ logstash_user }}" + group: "{{ logstash_group }}" + remote_src: "yes" + when: with_suricata and with_kafka + notify: Restart logstash + with_items: + - logstash-500-filter-suricata.conf + +- name: Add fsf input/output for logstash + template: + src: "{{ item }}.j2" + dest: "/etc/logstash/conf.d/{{ item }}.conf" + owner: "{{ logstash_user }}" + group: "{{ logstash_group }}" + mode: 0640 + when: with_fsf and with_kafka + notify: Restart logstash + with_items: + - logstash-100-input-kafka-fsf + - logstash-999-output-es-fsf + +- name: Install fsf-kafka filter for logstash + copy: + src: "{{rock_module_dir}}/configuration/logstash/{{item}}" + dest: "/etc/logstash/conf.d/{{item}}" + mode: 0640 + owner: "{{ logstash_user }}" + group: "{{ logstash_group }}" + remote_src: "yes" + when: with_fsf and with_kafka + notify: Restart logstash + with_items: + - logstash-500-filter-fsf.conf + +- name: Add parse failure input/output for logstash + template: + src: "{{ item }}.j2" + dest: "/etc/logstash/conf.d/{{ item }}.conf" + owner: "{{ logstash_user }}" + group: "{{ logstash_group }}" + mode: 0640 + notify: Restart logstash + with_items: + - logstash-999-output-es-parsefailures + +- name: Install parse failure configuration for logstash + copy: + src: "{{rock_module_dir}}/configuration/logstash/{{item}}" + dest: "/etc/logstash/conf.d/{{item}}" + mode: 0640 + owner: "{{ logstash_user }}" + group: "{{ logstash_group }}" + remote_src: "yes" + notify: Restart logstash + with_items: + - logstash-998-filter-parsefailures.conf + +- name: Enable and start logstash + service: + name: logstash + state: "{{ 'started' if enable_logstash else 'stopped' }}" + enabled: "{{ enable_logstash }}" diff --git a/playbooks/files/logstash-100-input-kafka-bro.conf b/roles/logstash/templates/logstash-100-input-kafka-bro.j2 similarity index 69% rename from playbooks/files/logstash-100-input-kafka-bro.conf rename to roles/logstash/templates/logstash-100-input-kafka-bro.j2 index 230d9ffba..ae180fb82 100644 --- a/playbooks/files/logstash-100-input-kafka-bro.conf +++ b/roles/logstash/templates/logstash-100-input-kafka-bro.j2 @@ -5,7 +5,7 @@ input { # Set this to one per kafka partition to scale up #consumer_threads => 4 group_id => "bro_logstash" - bootstrap_servers => "127.0.0.1:9092" + bootstrap_servers => {% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %} codec => json auto_offset_reset => "earliest" } diff --git a/playbooks/files/logstash-100-input-kafka-fsf.conf b/roles/logstash/templates/logstash-100-input-kafka-fsf.j2 similarity index 69% rename from playbooks/files/logstash-100-input-kafka-fsf.conf rename to roles/logstash/templates/logstash-100-input-kafka-fsf.j2 index e7c797a3f..3067eb312 100644 --- a/playbooks/files/logstash-100-input-kafka-fsf.conf +++ b/roles/logstash/templates/logstash-100-input-kafka-fsf.j2 @@ -5,7 +5,7 @@ input { # Set this to one per kafka partition to scale up #consumer_threads => 4 group_id => "fsf_logstash" - bootstrap_servers => "127.0.0.1:9092" + bootstrap_servers => {% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %} codec => json auto_offset_reset => "earliest" } diff --git a/playbooks/files/logstash-100-input-kafka-suricata.conf b/roles/logstash/templates/logstash-100-input-kafka-suricata.j2 similarity index 70% rename from playbooks/files/logstash-100-input-kafka-suricata.conf rename to roles/logstash/templates/logstash-100-input-kafka-suricata.j2 index 98aa319a3..46e09b2df 100644 --- a/playbooks/files/logstash-100-input-kafka-suricata.conf +++ b/roles/logstash/templates/logstash-100-input-kafka-suricata.j2 @@ -5,7 +5,7 @@ input { # Set this to one per kafka partition to scale up #consumer_threads => 4 group_id => "suricata_logstash" - bootstrap_servers => "127.0.0.1:9092" + bootstrap_servers => {% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %} codec => json auto_offset_reset => "earliest" } diff --git a/roles/logstash/templates/logstash-999-output-es-bro.j2 b/roles/logstash/templates/logstash-999-output-es-bro.j2 new file mode 100644 index 000000000..b0e2c591f --- /dev/null +++ b/roles/logstash/templates/logstash-999-output-es-bro.j2 @@ -0,0 +1,21 @@ +output { + if [@metadata][stage] == "broraw_kafka" { + kafka { + codec => json + topic_id => "bro-%{[@meta][event_type]}" + bootstrap_servers => {% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %} + } + + elasticsearch { + {% if groups['elasticsearch'] | length > 1 %} + hosts => [{% for host in groups['es_data'] %}"{{ host }}"{% if not loop.last %},{% endif %}{% endfor %}] + {% else %} + hosts => ["127.0.0.1:9200"] + {% endif %} + index => "bro-%{[@meta][event_type]}-%{+YYYY.MM.dd}" + manage_template => false + #template => "/opt/rocknsm/rock/playbooks/files/es-bro-mappings.json" + document_type => "_doc" + } + } +} diff --git a/roles/logstash/templates/logstash-999-output-es-fsf.j2 b/roles/logstash/templates/logstash-999-output-es-fsf.j2 new file mode 100644 index 000000000..67fa13c7f --- /dev/null +++ b/roles/logstash/templates/logstash-999-output-es-fsf.j2 @@ -0,0 +1,20 @@ +output { + if [@metadata][stage] == "fsfraw_kafka" { + kafka { + codec => json + topic_id => "fsf-clean" + bootstrap_servers => {% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %} + } + + elasticsearch { + {% if groups['elasticsearch'] | length > 1 %} + hosts => [{% for host in groups['es_data'] %}"{{ host }}"{% if not loop.last %},{% endif %}{% endfor %}] + {% else %} + hosts => ["127.0.0.1:9200"] + {% endif %} + index => "fsf-%{+YYYY.MM.dd}" + manage_template => false + document_type => "_doc" + } + } +} diff --git a/roles/logstash/templates/logstash-999-output-es-parsefailures.j2 b/roles/logstash/templates/logstash-999-output-es-parsefailures.j2 new file mode 100644 index 000000000..22dd38d26 --- /dev/null +++ b/roles/logstash/templates/logstash-999-output-es-parsefailures.j2 @@ -0,0 +1,13 @@ +output { + if [@metadata][stage] == "_parsefailure" { + elasticsearch { + {% if groups['elasticsearch'] | length > 1 %} + hosts => [{% for host in groups['es_data'] %}"{{ host }}"{% if not loop.last %},{% endif %}{% endfor %}] + {% else %} + hosts => ["127.0.0.1:9200"] + {% endif %} + index => "parse-failures-%{+YYYY.MM.dd}" + document_type => "_doc" + } + } +} diff --git a/roles/logstash/templates/logstash-999-output-es-suricata.j2 b/roles/logstash/templates/logstash-999-output-es-suricata.j2 new file mode 100644 index 000000000..ea0df6634 --- /dev/null +++ b/roles/logstash/templates/logstash-999-output-es-suricata.j2 @@ -0,0 +1,20 @@ +output { + if [@metadata][stage] == "suricataraw_kafka" { + kafka { + codec => json + topic_id => "suricata-clean" + bootstrap_servers => {% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %} + } + + elasticsearch { + {% if groups['elasticsearch'] | length > 1 %} + hosts => [{% for host in groups['es_data'] %}"{{ host }}"{% if not loop.last %},{% endif %}{% endfor %}] + {% else %} + hosts => ["127.0.0.1:9200"] + {% endif %} + index => "suricata-%{+YYYY.MM.dd}" + manage_template => false + document_type => "_doc" + } + } +} diff --git a/roles/logstash/templates/logstash_sysconfig.j2 b/roles/logstash/templates/logstash_sysconfig.j2 new file mode 100644 index 000000000..18ef7adea --- /dev/null +++ b/roles/logstash/templates/logstash_sysconfig.j2 @@ -0,0 +1,2 @@ +bootstrap_servers={% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %} +elasticsearch_hosts={% for host in groups['es_data'] %}"{{ host }}:9200"{% if not loop.last %},{% endif %}{% endfor %} diff --git a/playbooks/roles/sensor-defaults/defaults/main.yml b/roles/sensor-defaults/defaults/main.yml similarity index 97% rename from playbooks/roles/sensor-defaults/defaults/main.yml rename to roles/sensor-defaults/defaults/main.yml index 98ededac3..01d06c4a5 100644 --- a/playbooks/roles/sensor-defaults/defaults/main.yml +++ b/roles/sensor-defaults/defaults/main.yml @@ -8,8 +8,6 @@ rocknsm_dir: /opt/rocknsm rock_data_user: root rock_data_group: root rock_monifs: "{{ ansible_interfaces | difference(['lo', ansible_default_ipv4.interface | default('lo') ])| list }}" -rock_hostname: "{{ inventory_hostname_short }}" -rock_fqdn: "{{ inventory_hostname }}" rock_mgmt_nets: [ "0.0.0.0/0" ] rock_cache_dir: /srv/rocknsm/support pulledpork_rules: @@ -143,7 +141,7 @@ es_group: elasticsearch es_data_dir: "{{ rock_data_dir }}/elasticsearch" es_log_dir: /var/log/elasticsearch es_cluster_name: rocknsm -es_node_name: "{{ rock_hostname }}" +es_node_name: "{{ ansible_hostname }}" es_mem: "{{ (ansible_memtotal_mb // 1024 // 2) if (ansible_memtotal_mb // 1024) < 64 else 31 }}" es_url: "http://localhost:9200" es_memlock_override: | diff --git a/playbooks/roles/stenographer/README.md b/roles/stenographer/README.md similarity index 100% rename from playbooks/roles/stenographer/README.md rename to roles/stenographer/README.md diff --git a/playbooks/roles/stenographer/defaults/main.yml b/roles/stenographer/defaults/main.yml similarity index 100% rename from playbooks/roles/stenographer/defaults/main.yml rename to roles/stenographer/defaults/main.yml diff --git a/playbooks/files/stenographer.service b/roles/stenographer/files/stenographer.service similarity index 100% rename from playbooks/files/stenographer.service rename to roles/stenographer/files/stenographer.service diff --git a/playbooks/files/stenographer@.service b/roles/stenographer/files/stenographer@.service similarity index 100% rename from playbooks/files/stenographer@.service rename to roles/stenographer/files/stenographer@.service diff --git a/playbooks/roles/stenographer/handlers/main.yml b/roles/stenographer/handlers/main.yml similarity index 56% rename from playbooks/roles/stenographer/handlers/main.yml rename to roles/stenographer/handlers/main.yml index 9e6f6f1dc..273670403 100644 --- a/playbooks/roles/stenographer/handlers/main.yml +++ b/roles/stenographer/handlers/main.yml @@ -1,13 +1,19 @@ --- # handlers file for stenographer -- name: start stenographer service +- name: Start stenographer service service: name: stenographer state: "{{ 'started' if enable_stenographer else 'stopped' }}" -- name: start stenographer per interface +- name: Start stenographer per interface service: name: "stenographer@{{ item }}" state: "{{ 'started' if enable_stenographer else 'stopped' }}" with_items: "{{ stenographer_monitor_interfaces }}" + +- name: Restart stenographer per interface + service: + name: "stenographer@{{ item }}" + state: restarted + with_items: "{{ stenographer_monitor_interfaces }}" diff --git a/playbooks/roles/stenographer/tasks/config.yml b/roles/stenographer/tasks/config.yml similarity index 65% rename from playbooks/roles/stenographer/tasks/config.yml rename to roles/stenographer/tasks/config.yml index 99445a6ad..708766127 100644 --- a/playbooks/roles/stenographer/tasks/config.yml +++ b/roles/stenographer/tasks/config.yml @@ -3,13 +3,15 @@ ###################################################### ################# Config Stenographer ################# ###################################################### + - name: Set stenographer config template: - src: templates/stenographer-config.j2 + src: stenographer-config.j2 dest: "/etc/stenographer/config.{{ item.1 }}" with_indexed_items: "{{ stenographer_monitor_interfaces }}" + notify: Restart stenographer per interface -- name: Create Stenographer directories +- name: Create stenographer directories file: path: "{{ stenographer_data_dir }}/{{ item[0] }}/{{ item[1] }}" mode: 0755 @@ -34,19 +36,32 @@ - name: Generate stenographer keys command: > /usr/bin/stenokeys.sh {{ stenographer_user }} {{ stenographer_group }} + environment: + STENOGRAPHER_CONFIG: "/etc/stenographer/config.{{ stenographer_monitor_interfaces[0] }}" args: creates: /etc/stenographer/certs/client_key.pem -- name: Configure Stenographer service +- name: Configure stenographer service service: name: stenographer enabled: "{{ enable_stenographer }}" - notify: start stenographer service + notify: Start stenographer service -- name: configure stenographer per interface +- name: Configure stenographer per interface service: name: "stenographer@{{ item }}" enabled: "{{ enable_stenographer }}" with_items: "{{ stenographer_monitor_interfaces }}" - notify: start stenographer per interface + notify: Start stenographer per interface + +- name: Configure firewall ports + firewalld: + port: "{{ 1234 + index }}/tcp" + permanent: yes + state: enabled + immediate: yes + loop: "{{ stenographer_monitor_interfaces }}" + loop_control: + index_var: index + when: groups['stenographer'] | difference(groups['docket']) | count > 0 ... diff --git a/playbooks/roles/stenographer/tasks/deploy.yml b/roles/stenographer/tasks/deploy.yml similarity index 67% rename from playbooks/roles/stenographer/tasks/deploy.yml rename to roles/stenographer/tasks/deploy.yml index aec575e95..948357b99 100644 --- a/playbooks/roles/stenographer/tasks/deploy.yml +++ b/roles/stenographer/tasks/deploy.yml @@ -1,5 +1,4 @@ --- -- import_tasks: prechecks.yml - import_tasks: install.yml - import_tasks: config.yml ... diff --git a/playbooks/roles/stenographer/tasks/install.yml b/roles/stenographer/tasks/install.yml similarity index 100% rename from playbooks/roles/stenographer/tasks/install.yml rename to roles/stenographer/tasks/install.yml diff --git a/playbooks/roles/stenographer/tasks/main.yml b/roles/stenographer/tasks/main.yml similarity index 100% rename from playbooks/roles/stenographer/tasks/main.yml rename to roles/stenographer/tasks/main.yml diff --git a/playbooks/templates/stenographer-config.j2 b/roles/stenographer/templates/stenographer-config.j2 similarity index 89% rename from playbooks/templates/stenographer-config.j2 rename to roles/stenographer/templates/stenographer-config.j2 index b3d7010f3..fef5bf1d2 100644 --- a/playbooks/templates/stenographer-config.j2 +++ b/roles/stenographer/templates/stenographer-config.j2 @@ -10,7 +10,7 @@ , "StenotypePath": "/usr/bin/stenotype" , "Interface": "{{ item.1 }}" , "Port": {{ 1234 + item.0 }} - , "Host": "127.0.0.1" + , "Host": "{{ ansible_default_ipv4.address }}" , "Flags": ["-v"] , "CertPath": "/etc/stenographer/certs" } diff --git a/playbooks/roles/stenographer/tests/inventory b/roles/stenographer/tests/inventory similarity index 100% rename from playbooks/roles/stenographer/tests/inventory rename to roles/stenographer/tests/inventory diff --git a/playbooks/roles/stenographer/tests/test.yml b/roles/stenographer/tests/test.yml similarity index 100% rename from playbooks/roles/stenographer/tests/test.yml rename to roles/stenographer/tests/test.yml diff --git a/playbooks/roles/stenographer/vars/main.yml b/roles/stenographer/vars/main.yml similarity index 100% rename from playbooks/roles/stenographer/vars/main.yml rename to roles/stenographer/vars/main.yml diff --git a/playbooks/roles/suricata/README.md b/roles/suricata/README.md similarity index 100% rename from playbooks/roles/suricata/README.md rename to roles/suricata/README.md diff --git a/playbooks/roles/suricata/defaults/main.yml b/roles/suricata/defaults/main.yml similarity index 100% rename from playbooks/roles/suricata/defaults/main.yml rename to roles/suricata/defaults/main.yml diff --git a/playbooks/files/suricata.service b/roles/suricata/files/suricata.service similarity index 100% rename from playbooks/files/suricata.service rename to roles/suricata/files/suricata.service diff --git a/playbooks/files/suricata.tmpfiles b/roles/suricata/files/suricata.tmpfiles similarity index 100% rename from playbooks/files/suricata.tmpfiles rename to roles/suricata/files/suricata.tmpfiles diff --git a/playbooks/files/suricata.yaml b/roles/suricata/files/suricata.yaml similarity index 100% rename from playbooks/files/suricata.yaml rename to roles/suricata/files/suricata.yaml diff --git a/roles/suricata/handlers/main.yml b/roles/suricata/handlers/main.yml new file mode 100644 index 000000000..31d8afa44 --- /dev/null +++ b/roles/suricata/handlers/main.yml @@ -0,0 +1,13 @@ +--- +# handlers file for suricata + +- name: Restart filebeat + systemd: + name: filebeat + state: restarted + +- name: Configure monitor interfaces + shell: > + for intf in {{ rock_monifs | join(' ') }}; do + /sbin/ifup ${intf}; + done diff --git a/roles/suricata/tasks/main.yml b/roles/suricata/tasks/main.yml new file mode 100644 index 000000000..0e304fb04 --- /dev/null +++ b/roles/suricata/tasks/main.yml @@ -0,0 +1,182 @@ +--- +# handlers file for suricata + +- name: Install packages + yum: + name: + - suricata + - suricata-update + - filebeat + state: present + +- name: Set monitor interface config + template: + src: templates/ifcfg-monif.j2 + dest: /etc/sysconfig/network-scripts/ifcfg-{{ item }} + mode: 0644 + owner: root + group: root + force: yes + with_items: "{{ rock_monifs }}" + +- name: Configure local ifup script + template: + src: templates/ifup-local.j2 + dest: /sbin/ifup-local + mode: 0755 + owner: root + group: root + force: yes + notify: Configure monitor interfaces + +- name: Create filebeat config directory + file: + path: /etc/filebeat/configs + mode: 0755 + owner: root + group: root + state: directory + +- name: Add filebeat configuration file + template: + src: "{{ item.src }}" + dest: "/etc/filebeat/{{ item.dest }}" + notify: Restart filebeat + loop: + - { src: 'filebeat.yml.j2', dest: 'filebeat.yml' } + - { src: 'fb-suricata.yml.j2', dest: 'configs/suricata.yml' } + +- name: Enable and start filebeat + service: + name: filebeat + state: "{{ 'started' if enable_filebeat else 'stopped' }}" + enabled: "{{ enable_filebeat }}" + +- name: Create suricata data directory + file: + path: "{{ suricata_data_dir }}/" + mode: 0755 + owner: "{{ suricata_user }}" + group: "{{ suricata_group }}" + state: directory + setype: var_log_t + +- name: Remove suricata sysconfig file + file: + path: /etc/sysconfig/suricata + state: absent + +- name: Install suricata service files + copy: + src: "suricata.service" + dest: "/etc/systemd/system/suricata.service" + mode: 0644 + owner: root + group: root + +- name: Setup suricata tmpfiles + copy: + src: "suricata.tmpfiles" + dest: "/etc/tmpfiles.d/suricata.conf" + mode: 0644 + owner: root + group: root + +- name: Install suricata overrides + template: + src: templates/suricata_overrides.yaml.j2 + dest: /etc/suricata/rocknsm-overrides.yaml + mode: 0640 + owner: "root" + group: "{{ suricata_group }}" + +- name: Create IP reputation config directory + file: + path: /etc/suricata/rules/iplists + state: directory + owner: root + group: root + mode: 0755 + +- name: Create directories for suricata-update + file: + path: "{{ suricata_var_dir }}/{{ item }}" + state: directory + owner: "{{ suricata_user }}" + group: "{{ suricata_group }}" + mode: 0755 + recurse: "yes" + with_items: + - rules + - update + +- name: Set suricata overrides include + lineinfile: + dest: /etc/suricata/suricata.yaml + line: "include: rocknsm-overrides.yaml" + state: present + +- name: Enable and start suricata + service: + name: suricata + enabled: "{{ enable_suricata }}" + state: "{{ 'started' if enable_suricata else 'stopped' }}" + +- name: Configure logrotate for suricata logs + template: + src: templates/logrotate-suricata.j2 + dest: /etc/logrotate.d/suricata + mode: 0644 + owner: root + group: root + +- name: Create local rules source for offline install of suricata + command: /usr/bin/suricata-update add-source "emerging-threats-offline" "file:///srv/rocknsm/support/emerging.rules-suricata.tar.gz" + args: + creates: /var/lib/suricata/update/sources/emerging-threats-offline.yaml + when: with_suricata_update and not rock_online_install + become: yes + become_user: "{{ suricata_user }}" + +- name: Offline install of suricata rules + command: /usr/bin/suricata-update update --reload-command "/usr/bin/systemctl kill -s USR2 suricata" + args: + creates: /var/lib/suricata/rules/suricata.rules + when: enable_suricata_update and not rock_online_install + become: yes + become_user: "{{ suricata_user }}" + +- name: Update suricata-update source index + command: /usr/bin/suricata-update update-sources + args: + creates: /var/lib/suricata/update/cache/index.yaml + when: enable_suricata_update and rock_online_install + become: yes + become_user: "{{ suricata_user }}" + +- name: Explicitly enable ET rules for suricata-update online + command: /usr/bin/suricata-update enable-source et/open + args: + creates: /var/lib/suricata/update/sources/et-open.yaml + when: enable_suricata_update and rock_online_install + become: yes + become_user: "{{ suricata_user }}" + +- name: Suricata-update online rules pull + command: /usr/bin/suricata-update update --reload-command "/usr/bin/systemctl kill -s USR2 suricata" + args: + creates: /var/lib/suricata/rules/suricata.rules + when: enable_suricata_update and rock_online_install + become: yes + become_user: "{{ suricata_user }}" + +- name: Cron for suricata-update + cron: + name: "suricata-update" + cron_file: rocknsm_suricata-update + user: "{{ suricata_user }}" + hour: "12" + minute: "0" + job: /usr/bin/suricata-update update --reload-command "/usr/bin/systemctl kill -s USR2 suricata" + > /var/log/suricata-update.log 2>&1 + when: enable_suricata_update diff --git a/roles/suricata/templates/fb-suricata.yml.j2 b/roles/suricata/templates/fb-suricata.yml.j2 new file mode 100644 index 000000000..143882604 --- /dev/null +++ b/roles/suricata/templates/fb-suricata.yml.j2 @@ -0,0 +1,7 @@ +- input_type: log + paths: + - {{ rock_data_dir }}/suricata/eve.json + json.keys_under_root: true + fields: + kafka_topic: suricata-raw + fields_under_root: true diff --git a/playbooks/templates/logrotate-suricata.j2 b/roles/suricata/templates/logrotate-suricata.j2 similarity index 100% rename from playbooks/templates/logrotate-suricata.j2 rename to roles/suricata/templates/logrotate-suricata.j2 diff --git a/playbooks/templates/suricata_overrides.yaml.j2 b/roles/suricata/templates/suricata_overrides.yaml.j2 similarity index 100% rename from playbooks/templates/suricata_overrides.yaml.j2 rename to roles/suricata/templates/suricata_overrides.yaml.j2 diff --git a/playbooks/roles/suricata/tests/inventory b/roles/suricata/tests/inventory similarity index 100% rename from playbooks/roles/suricata/tests/inventory rename to roles/suricata/tests/inventory diff --git a/playbooks/roles/suricata/tests/test.yml b/roles/suricata/tests/test.yml similarity index 100% rename from playbooks/roles/suricata/tests/test.yml rename to roles/suricata/tests/test.yml diff --git a/playbooks/roles/suricata/vars/main.yml b/roles/suricata/vars/main.yml similarity index 100% rename from playbooks/roles/suricata/vars/main.yml rename to roles/suricata/vars/main.yml diff --git a/playbooks/roles/zookeeper/README.md b/roles/zookeeper/README.md similarity index 100% rename from playbooks/roles/zookeeper/README.md rename to roles/zookeeper/README.md diff --git a/playbooks/roles/zookeeper/defaults/main.yml b/roles/zookeeper/defaults/main.yml similarity index 100% rename from playbooks/roles/zookeeper/defaults/main.yml rename to roles/zookeeper/defaults/main.yml diff --git a/playbooks/roles/zookeeper/handlers/main.yml b/roles/zookeeper/handlers/main.yml similarity index 100% rename from playbooks/roles/zookeeper/handlers/main.yml rename to roles/zookeeper/handlers/main.yml diff --git a/roles/zookeeper/tasks/main.yml b/roles/zookeeper/tasks/main.yml new file mode 100644 index 000000000..0dec83740 --- /dev/null +++ b/roles/zookeeper/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: Install zookeeper packages + yum: + name: + - java-1.8.0-headless + - zookeeper + state: installed + +- name: Enable and Start zookeeper + systemd: + name: zookeeper + state: "{{ 'started' if enable_zookeeper else 'stopped' }}" + enabled: "{{ enable_zookeeper }}" +... diff --git a/playbooks/roles/zookeeper/tests/inventory b/roles/zookeeper/tests/inventory similarity index 100% rename from playbooks/roles/zookeeper/tests/inventory rename to roles/zookeeper/tests/inventory diff --git a/playbooks/roles/zookeeper/tests/test.yml b/roles/zookeeper/tests/test.yml similarity index 100% rename from playbooks/roles/zookeeper/tests/test.yml rename to roles/zookeeper/tests/test.yml diff --git a/playbooks/roles/zookeeper/vars/main.yml b/roles/zookeeper/vars/main.yml similarity index 100% rename from playbooks/roles/zookeeper/vars/main.yml rename to roles/zookeeper/vars/main.yml