Skip to content

Commit d4cfa54

Browse files
authored
Use systemd for opendistro/kibana/filebeat (#52)
* use systemd for opendistro/kibana/filebeat * move podman tmpdir onto /run to fix reboot issues * remove incorrect podman group override in everything template * fix podman user tmp directory permissions * remove confirmation when resetting podman database * fix elasticsearch parameters in kibana unit file * remove hardcoded podman user/group name * Move podman temp dir code to podman role * remove hardcoded podman username in tmpfiles config
1 parent 6516031 commit d4cfa54

File tree

15 files changed

+193
-58
lines changed

15 files changed

+193
-58
lines changed

ansible/roles/filebeat/handlers/main.yml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
---
22

33
- name: Restart filebeat container
4-
command: podman restart filebeat
4+
systemd:
5+
name: filebeat.service
6+
state: restarted
7+
enabled: yes
8+
daemon_reload: yes
59
become: true
6-
become_user: "{{ filebeat_podman_user }}"
Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,7 @@
11
---
2-
- name: Setup file beat
3-
containers.podman.podman_container:
4-
image: docker.elastic.co/beats/filebeat-oss:7.9.3
5-
name: filebeat
6-
state: started
7-
user: root
8-
restart_policy: "always"
9-
security_opt:
10-
# Required to read /var/log. There might be a better solution, see:https://github.com/containers/podman/issues/3683
11-
- label=disable
12-
volumes:
13-
- /var/log/:/logs:ro
14-
- /etc/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
15-
command: -e -strict.perms=false -d "*"
2+
- name: Create systemd unit file
3+
template:
4+
dest: /etc/systemd/system/filebeat.service
5+
src: filebeat.service.j2
166
become: true
17-
become_user: "{{ filebeat_podman_user }}"
7+
notify: Restart filebeat container
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# container-filebeat.service
2+
# based off
3+
# podman generate systemd filebeat --restart-policy always --new --name
4+
# with pid/cidfiles replaced with --sdnotify=conmon approach
5+
6+
[Unit]
7+
Description=Podman container-filebeat.service
8+
Documentation=man:podman-generate-systemd(1)
9+
Wants=network.target
10+
After=network-online.target
11+
12+
[Service]
13+
Environment=PODMAN_SYSTEMD_UNIT=%n
14+
Restart=always
15+
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --replace --name filebeat --user root --restart=always --security-opt label=disable --volume /var/log/:/logs:ro --volume /etc/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro --detach=True docker.elastic.co/beats/filebeat-oss:7.9.3 -e -strict.perms=false -d "*"
16+
ExecStop=/usr/bin/podman stop --ignore filebeat -t 10
17+
ExecStopPost=/usr/bin/podman rm --ignore -f filebeat
18+
KillMode=none
19+
Type=notify
20+
NotifyAccess=all
21+
User={{ filebeat_podman_user }}
22+
Group={{ filebeat_podman_user }}
23+
TimeoutStartSec=180
24+
25+
[Install]
26+
WantedBy=multi-user.target default.target
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
---
2+
3+
- name: Restart kibana container
4+
systemd:
5+
name: kibana.service
6+
state: restarted
7+
enabled: yes
8+
daemon_reload: yes
9+
become: true

ansible/roles/kibana/tasks/deploy.yml

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -6,19 +6,13 @@
66
name: kibana
77
become: true
88
become_user: "{{ kibana_podman_user }}"
9+
notify: Restart kibana container
910

10-
- name: Setup kibana
11-
containers.podman.podman_container:
12-
image: amazon/opendistro-for-elasticsearch-kibana:1.12.0
13-
name: kibana
14-
state: started
15-
restart_policy: "always"
16-
ports:
17-
- "5601:5601"
18-
env:
19-
ELASTICSEARCH_URL: https://{{ elasticsearch_address }}:9200
20-
ELASTICSEARCH_HOSTS: https://{{ elasticsearch_address }}:9200
21-
ELASTICSEARCH_USERNAME: admin
22-
ELASTICSEARCH_PASSWORD: "{{ secrets_openhpc_elasticsearch_admin_password }}"
23-
become_user: "{{ kibana_podman_user }}"
11+
- name: Create systemd unit file
12+
template:
13+
dest: /etc/systemd/system/kibana.service
14+
src: kibana.service.j2
2415
become: true
16+
notify: Restart kibana container
17+
18+
- meta: flush_handlers
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
# container-kibana.service
2+
3+
[Unit]
4+
Description=Podman container-kibana.service
5+
Documentation=man:podman-generate-systemd(1)
6+
Wants=network.target
7+
After=network-online.target
8+
9+
[Service]
10+
Environment=PODMAN_SYSTEMD_UNIT=%n
11+
Restart=always
12+
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon -d --replace --name kibana --restart=no --env ELASTICSEARCH_URL=https://{{ elasticsearch_address }}:9200 --env ELASTICSEARCH_HOSTS=https://{{ elasticsearch_address}}:9200 --env ELASTICSEARCH_USERNAME=admin --env ELASTICSEARCH_PASSWORD="{{ secrets_openhpc_elasticsearch_admin_password }}" --publish 5601:5601 --detach=True amazon/opendistro-for-elasticsearch-kibana:1.12.0
13+
ExecStop=/usr/bin/podman stop --ignore kibana -t 10
14+
ExecStopPost=/usr/bin/podman rm --ignore -f kibana
15+
KillMode=none
16+
Type=notify
17+
NotifyAccess=all
18+
User={{ kibana_podman_user }}
19+
Group={{ kibana_podman_user }}
20+
TimeoutStartSec=180
21+
22+
[Install]
23+
WantedBy=multi-user.target default.target

ansible/roles/opendistro/handlers/main.yml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
---
22

33
- name: Restart opendistro container
4-
command: podman restart opendistro
4+
systemd:
5+
name: opendistro.service
6+
state: restarted
7+
enabled: yes
8+
daemon_reload: yes
59
become: true
6-
become_user: "{{ opendistro_podman_user }}"

ansible/roles/opendistro/tasks/deploy.yml

Lines changed: 6 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -5,27 +5,11 @@
55
name: opendistro
66
become: true
77
become_user: "{{ opendistro_podman_user }}"
8+
notify: Restart opendistro container
89

9-
- name: Setup opendistro
10-
containers.podman.podman_container:
11-
name: opendistro
12-
image: amazon/opendistro-for-elasticsearch:1.12.0
13-
state: started
14-
restart_policy: "always"
15-
ports:
16-
- "9200:9200"
17-
user: elasticsearch
18-
ulimit:
19-
- memlock=-1:-1
20-
# maximum number of open files for the Elasticsearch user, set to at least 65536 on modern systems
21-
- nofile=65536:65536
22-
volume:
23-
- opendistro:/usr/share/elasticsearch/data
24-
- /etc/elastic/internal_users.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/internal_users.yml:ro
25-
env:
26-
node.name: opendistro
27-
discovery.type: single-node
28-
bootstrap.memory_lock: "true" # along with the memlock settings below, disables swapping
29-
ES_JAVA_OPTS: -Xms512m -Xmx512m # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
10+
- name: Create systemd unit file
11+
template:
12+
dest: /etc/systemd/system/opendistro.service
13+
src: opendistro.service.j2
3014
become: true
31-
become_user: "{{ opendistro_podman_user }}"
15+
notify: Restart opendistro container
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# container-opendistro.service
2+
3+
[Unit]
4+
Description=Podman container-opendistro.service
5+
Documentation=man:podman-generate-systemd(1)
6+
Wants=network.target
7+
After=network-online.target
8+
9+
[Service]
10+
Environment=PODMAN_SYSTEMD_UNIT=%n
11+
Restart=always
12+
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon -d --replace --name opendistro --restart=no --user elasticsearch --ulimit memlock=-1:-1 --ulimit nofile=65536:65536 --volume opendistro:/usr/share/elasticsearch/data --volume /etc/elastic/internal_users.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/internal_users.yml:ro --env node.name=opendistro --env discovery.type=single-node --env bootstrap.memory_lock=true --env "ES_JAVA_OPTS=-Xms512m -Xmx512m" --publish 9200:9200 amazon/opendistro-for-elasticsearch:1.12.0
13+
ExecStop=/usr/bin/podman stop --ignore opendistro -t 10
14+
# note for some reason this returns status=143 which makes systemd show the unit as failed, not stopped
15+
ExecStopPost=/usr/bin/podman rm --ignore -f opendistro
16+
SuccessExitStatus=143 SIGTERM
17+
KillMode=none
18+
Type=notify
19+
NotifyAccess=all
20+
LimitNOFILE=65536
21+
LimitMEMLOCK=infinity
22+
User={{ opendistro_podman_user }}
23+
Group={{ opendistro_podman_user }}
24+
TimeoutStartSec=180
25+
26+
[Install]
27+
WantedBy=multi-user.target default.target
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
podman_users:
2+
- name: "{{ ansible_user }}"
3+
podman_tmp_dir_root: /run # MUST be on a tmpfs

ansible/roles/podman/tasks/config.yml

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,60 @@
1515

1616
- name: reset ssh connection to allow user changes to affect 'current login user'
1717
meta: reset_connection
18+
19+
- name: Ensure podman users exist
20+
user: "{{ item }}"
21+
with_items: "{{ podman_users }}"
22+
register: podman_user_info
23+
24+
- name: Define tmp directories on tmpfs
25+
blockinfile:
26+
path: /etc/tmpfiles.d/podman.conf
27+
create: yes
28+
block: |
29+
d {{ podman_tmp_dir_root }}/{{ item.name }}/libpod/tmp 0755 {{ item.name }} {{ item.name }}
30+
Z {{ podman_tmp_dir_root }}/{{ item.name }} 0755 {{ item.name }} {{ item.name }}
31+
become: yes
32+
loop: "{{ podman_users }}"
33+
register: podman_tmp_dirs
34+
35+
- name: Create tmp directories
36+
command: systemd-tmpfiles --create
37+
become: true
38+
when: podman_tmp_dirs.results | selectattr('changed') | list | length > 0 # when: any changed
39+
40+
- name: Create podman configuration directories
41+
file:
42+
path: "{{ item.home }}/.config/containers/"
43+
state: directory
44+
owner: "{{ item.name }}"
45+
group: "{{ item.name }}"
46+
become: yes
47+
loop: "{{ podman_user_info.results }}"
48+
49+
- name: Set podman to use temp directories
50+
community.general.ini_file:
51+
path: "{{ item.home }}/.config/containers/containers.conf"
52+
section: engine
53+
option: tmp_dir
54+
value: '"{{ podman_tmp_dir_root }}/{{ item.name }}/libpod/tmp"'
55+
owner: "{{ item.name }}"
56+
group: "{{ item.name }}"
57+
create: yes
58+
loop: "{{ podman_user_info.results }}"
59+
become: yes
60+
register: podman_tmp
61+
62+
- debug:
63+
var: podman_tmp
64+
65+
- name: Reset podman database
66+
# otherwise old config overrides!
67+
command:
68+
cmd: podman system reset --force
69+
become: yes
70+
become_user: "{{ item.item.name }}"
71+
when: item.changed
72+
loop: "{{ podman_tmp.results }}"
73+
loop_control:
74+
label: "{{ item.item.name }}"
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
- name: Get tmp directory filesystem type
2+
command: stat -f -c %T {{ podman_tmp_dir_root }}
3+
register: podman_tmp_fstype
4+
changed_when: false
5+
6+
- name: Check tmp directory is on tmpfs
7+
assert:
8+
that: podman_tmp_fstype.stdout == 'tmpfs'
9+
fail_msg: "{{ podman_tmp_fstype }} (variable podman_tmp_fstype) must be on tmpfs"

ansible/validate.yml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,15 @@
22

33
# Fail early if configuration is invalid
44

5+
- name: Validate podman configuration
6+
hosts: podman
7+
tags: podman
8+
tasks:
9+
- import_role:
10+
name: podman
11+
tasks_from: validate.yml
12+
tags: validate
13+
514
- name: Validate filebeat configuration
615
hosts: filebeat
716
tags: filebeat
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
podman_users: "{{ appliances_local_users_podman }}"

environments/common/layouts/everything

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,6 @@ cluster
1010
[mysql:children]
1111
control
1212

13-
[podman:children]
14-
cluster
15-
1613
[prometheus:children]
1714
control
1815

0 commit comments

Comments
 (0)