Skip to content

Use systemd for opendistro/kibana/filebeat #52

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Mar 31, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions ansible/roles/filebeat/handlers/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
---

- name: Restart filebeat container
command: podman restart filebeat
systemd:
name: filebeat.service
state: restarted
enabled: yes
daemon_reload: yes
become: true
become_user: "{{ filebeat_podman_user }}"
20 changes: 5 additions & 15 deletions ansible/roles/filebeat/tasks/deploy.yml
Original file line number Diff line number Diff line change
@@ -1,17 +1,7 @@
---
- name: Setup file beat
containers.podman.podman_container:
image: docker.elastic.co/beats/filebeat-oss:7.9.3
name: filebeat
state: started
user: root
restart_policy: "always"
security_opt:
# Required to read /var/log. There might be a better solution, see:https://github.com/containers/podman/issues/3683
- label=disable
volumes:
- /var/log/:/logs:ro
- /etc/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
command: -e -strict.perms=false -d "*"
- name: Create systemd unit file
template:
dest: /etc/systemd/system/filebeat.service
src: filebeat.service.j2
become: true
become_user: "{{ filebeat_podman_user }}"
notify: Restart filebeat container
26 changes: 26 additions & 0 deletions ansible/roles/filebeat/templates/filebeat.service.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# container-filebeat.service
# based off
# podman generate systemd filebeat --restart-policy always --new --name
# with pid/cidfiles replaced with --sdnotify=conmon approach

[Unit]
Description=Podman container-filebeat.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target

[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --replace --name filebeat --user root --restart=always --security-opt label=disable --volume /var/log/:/logs:ro --volume /etc/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro --detach=True docker.elastic.co/beats/filebeat-oss:7.9.3 -e -strict.perms=false -d "*"
ExecStop=/usr/bin/podman stop --ignore filebeat -t 10
ExecStopPost=/usr/bin/podman rm --ignore -f filebeat
KillMode=none
Type=notify
NotifyAccess=all
User={{ filebeat_podman_user }}
Group={{ filebeat_podman_user }}
TimeoutStartSec=180

[Install]
WantedBy=multi-user.target default.target
9 changes: 9 additions & 0 deletions ansible/roles/kibana/handlers/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---

- name: Restart kibana container
systemd:
name: kibana.service
state: restarted
enabled: yes
daemon_reload: yes
become: true
22 changes: 8 additions & 14 deletions ansible/roles/kibana/tasks/deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,13 @@
name: kibana
become: true
become_user: "{{ kibana_podman_user }}"
notify: Restart kibana container

- name: Setup kibana
containers.podman.podman_container:
image: amazon/opendistro-for-elasticsearch-kibana:1.12.0
name: kibana
state: started
restart_policy: "always"
ports:
- "5601:5601"
env:
ELASTICSEARCH_URL: https://{{ elasticsearch_address }}:9200
ELASTICSEARCH_HOSTS: https://{{ elasticsearch_address }}:9200
ELASTICSEARCH_USERNAME: admin
ELASTICSEARCH_PASSWORD: "{{ secrets_openhpc_elasticsearch_admin_password }}"
become_user: "{{ kibana_podman_user }}"
- name: Create systemd unit file
template:
dest: /etc/systemd/system/kibana.service
src: kibana.service.j2
become: true
notify: Restart kibana container

- meta: flush_handlers
23 changes: 23 additions & 0 deletions ansible/roles/kibana/templates/kibana.service.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# container-kibana.service

[Unit]
Description=Podman container-kibana.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target

[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon -d --replace --name kibana --restart=no --env ELASTICSEARCH_URL=https://{{ elasticsearch_address }}:9200 --env ELASTICSEARCH_HOSTS=https://{{ elasticsearch_address}}:9200 --env ELASTICSEARCH_USERNAME=admin --env ELASTICSEARCH_PASSWORD="{{ secrets_openhpc_elasticsearch_admin_password }}" --publish 5601:5601 --detach=True amazon/opendistro-for-elasticsearch-kibana:1.12.0
ExecStop=/usr/bin/podman stop --ignore kibana -t 10
ExecStopPost=/usr/bin/podman rm --ignore -f kibana
KillMode=none
Type=notify
NotifyAccess=all
User={{ kibana_podman_user }}
Group={{ kibana_podman_user }}
TimeoutStartSec=180

[Install]
WantedBy=multi-user.target default.target
7 changes: 5 additions & 2 deletions ansible/roles/opendistro/handlers/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
---

- name: Restart opendistro container
command: podman restart opendistro
systemd:
name: opendistro.service
state: restarted
enabled: yes
daemon_reload: yes
become: true
become_user: "{{ opendistro_podman_user }}"
28 changes: 6 additions & 22 deletions ansible/roles/opendistro/tasks/deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,27 +5,11 @@
name: opendistro
become: true
become_user: "{{ opendistro_podman_user }}"
notify: Restart opendistro container

- name: Setup opendistro
containers.podman.podman_container:
name: opendistro
image: amazon/opendistro-for-elasticsearch:1.12.0
state: started
restart_policy: "always"
ports:
- "9200:9200"
user: elasticsearch
ulimit:
- memlock=-1:-1
# maximum number of open files for the Elasticsearch user, set to at least 65536 on modern systems
- nofile=65536:65536
volume:
- opendistro:/usr/share/elasticsearch/data
- /etc/elastic/internal_users.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/internal_users.yml:ro
env:
node.name: opendistro
discovery.type: single-node
bootstrap.memory_lock: "true" # along with the memlock settings below, disables swapping
ES_JAVA_OPTS: -Xms512m -Xmx512m # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
- name: Create systemd unit file
template:
dest: /etc/systemd/system/opendistro.service
src: opendistro.service.j2
become: true
become_user: "{{ opendistro_podman_user }}"
notify: Restart opendistro container
27 changes: 27 additions & 0 deletions ansible/roles/opendistro/templates/opendistro.service.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# container-opendistro.service

[Unit]
Description=Podman container-opendistro.service
Documentation=man:podman-generate-systemd(1)
Wants=network.target
After=network-online.target

[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon -d --replace --name opendistro --restart=no --user elasticsearch --ulimit memlock=-1:-1 --ulimit nofile=65536:65536 --volume opendistro:/usr/share/elasticsearch/data --volume /etc/elastic/internal_users.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/internal_users.yml:ro --env node.name=opendistro --env discovery.type=single-node --env bootstrap.memory_lock=true --env "ES_JAVA_OPTS=-Xms512m -Xmx512m" --publish 9200:9200 amazon/opendistro-for-elasticsearch:1.12.0
ExecStop=/usr/bin/podman stop --ignore opendistro -t 10
# note for some reason this returns status=143 which makes systemd show the unit as failed, not stopped
ExecStopPost=/usr/bin/podman rm --ignore -f opendistro
SuccessExitStatus=143 SIGTERM
KillMode=none
Type=notify
NotifyAccess=all
LimitNOFILE=65536
LimitMEMLOCK=infinity
User={{ opendistro_podman_user }}
Group={{ opendistro_podman_user }}
TimeoutStartSec=180

[Install]
WantedBy=multi-user.target default.target
3 changes: 3 additions & 0 deletions ansible/roles/podman/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
podman_users:
- name: "{{ ansible_user }}"
podman_tmp_dir_root: /run # MUST be on a tmpfs
57 changes: 57 additions & 0 deletions ansible/roles/podman/tasks/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,60 @@

- name: reset ssh connection to allow user changes to affect 'current login user'
meta: reset_connection

- name: Ensure podman users exist
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Still reckon we should only do this in one place and assume that the users exist in this role, but as this will essentially be a no-op at the cost of running a few extra tasks, probably not one to bike-shed over as the overall patch looks good to me.

user: "{{ item }}"
with_items: "{{ podman_users }}"
register: podman_user_info

- name: Define tmp directories on tmpfs
blockinfile:
path: /etc/tmpfiles.d/podman.conf
create: yes
block: |
d {{ podman_tmp_dir_root }}/{{ item.name }}/libpod/tmp 0755 {{ item.name }} {{ item.name }}
Z {{ podman_tmp_dir_root }}/{{ item.name }} 0755 {{ item.name }} {{ item.name }}
become: yes
loop: "{{ podman_users }}"
register: podman_tmp_dirs

- name: Create tmp directories
command: systemd-tmpfiles --create
become: true
when: podman_tmp_dirs.results | selectattr('changed') | list | length > 0 # when: any changed

- name: Create podman configuration directories
file:
path: "{{ item.home }}/.config/containers/"
state: directory
owner: "{{ item.name }}"
group: "{{ item.name }}"
become: yes
loop: "{{ podman_user_info.results }}"

- name: Set podman to use temp directories
community.general.ini_file:
path: "{{ item.home }}/.config/containers/containers.conf"
section: engine
option: tmp_dir
value: '"{{ podman_tmp_dir_root }}/{{ item.name }}/libpod/tmp"'
owner: "{{ item.name }}"
group: "{{ item.name }}"
create: yes
loop: "{{ podman_user_info.results }}"
become: yes
register: podman_tmp

- debug:
var: podman_tmp

- name: Reset podman database
# otherwise old config overrides!
command:
cmd: podman system reset --force
become: yes
become_user: "{{ item.item.name }}"
when: item.changed
loop: "{{ podman_tmp.results }}"
loop_control:
label: "{{ item.item.name }}"
9 changes: 9 additions & 0 deletions ansible/roles/podman/tasks/validate.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
- name: Get tmp directory filesystem type
command: stat -f -c %T {{ podman_tmp_dir_root }}
register: podman_tmp_fstype
changed_when: false

- name: Check tmp directory is on tmpfs
assert:
that: podman_tmp_fstype.stdout == 'tmpfs'
fail_msg: "{{ podman_tmp_fstype }} (variable podman_tmp_fstype) must be on tmpfs"
9 changes: 9 additions & 0 deletions ansible/validate.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,15 @@

# Fail early if configuration is invalid

- name: Validate podman configuration
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Put in podman role?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

See comment above as to why all of this isn't in the role.

hosts: podman
tags: podman
tasks:
- import_role:
name: podman
tasks_from: validate.yml
tags: validate

- name: Validate filebeat configuration
hosts: filebeat
tags: filebeat
Expand Down
1 change: 1 addition & 0 deletions environments/common/inventory/group_vars/all/podman.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
podman_users: "{{ appliances_local_users_podman }}"
3 changes: 0 additions & 3 deletions environments/common/layouts/everything
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,6 @@ cluster
[mysql:children]
control

[podman:children]
cluster

[prometheus:children]
control

Expand Down