From f64d0b449c7f81306339c77fd38c6680a6926f58 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?J=C3=B6rg=20Sachse?= <joerg.sachse@slub-dresden.de>
Date: Fri, 2 Sep 2022 09:09:48 +0200
Subject: [PATCH] feat: include DisApp, simplify SystemD units, add service
 protections, update Bash completion

---
 files/home/processing/.bash_completion        |  49 ---------
 files/usr/local/bin/move_old_logs.sh          |  27 +++--
 molecule/resources/playbooks/prepare.yml      |   6 +-
 tasks/configure_nfs_mounts.yml                |  79 ++++++++++---
 tasks/configure_processing_user.yml           |  34 +++++-
 tasks/install_subapp.yml                      | 104 +++++++++++++-----
 tasks/install_ta_tools.yml                    |   3 +-
 templates/disapp.cfg.j2                       |  79 +++++++++++++
 templates/etc/systemd/user/disapp.service.j2  |  59 ++++++++++
 templates/etc/systemd/user/subapp.service.j2  |  46 +++-----
 .../webservice_status_SLUBarchiv.service.j2   |  20 ++--
 templates/subapp.cfg.j2                       |   8 +-
 .../usr/lib/tmpfiles.d/disapp-pid-dir.conf.j2 |   1 +
 13 files changed, 358 insertions(+), 157 deletions(-)
 delete mode 100644 files/home/processing/.bash_completion
 create mode 100644 templates/disapp.cfg.j2
 create mode 100644 templates/etc/systemd/user/disapp.service.j2
 create mode 100644 templates/usr/lib/tmpfiles.d/disapp-pid-dir.conf.j2

diff --git a/files/home/processing/.bash_completion b/files/home/processing/.bash_completion
deleted file mode 100644
index 016c0c9..0000000
--- a/files/home/processing/.bash_completion
+++ /dev/null
@@ -1,49 +0,0 @@
-# installation help:
-#   1. copy into home directory of Submission Application user, e.g. /home/processing
-#   2. edit IMPORT_DIR (path to Submission Application Import directory)
-#   3. restart bash
-#   * requires bash-completion helper function _filedir, e.g. built in Debian
-#   * requires alias 'subapp' for execution of Submission Application,
-#     e.g. adding 'alias subapp='/usr/bin/perl -I /usr/local/perl /usr/local/bin/subapp_bagit.pl' to ~/.bashrc
-_pushd () {
-    command pushd "$@" > /dev/null
-}
-_popd () {
-    command popd "$@" > /dev/null
-}
-_subapp()
-{
-    IMPORT_DIR="/mnt/import"
-    local cur prev first opts
-    COMPREPLY=()
-    cur="${COMP_WORDS[COMP_CWORD]}"
-    prev="${COMP_WORDS[COMP_CWORD-1]}"
-    first="${COMP_WORDS[1]}"
-    opts="\
---help \
---man \
---config-file \
---single_run \
---reset_failed_preingest \
---force_restore_lza_id \
---permanent_report \
---start \
---status \
---stop \
---dismantle-orders"
-
-    case "$first" in
-        "--reset_failed_preingest")
-            _pushd "$IMPORT_DIR"
-            _filedir -d
-            _popd
-            return
-            ;;
-    esac
-
-    if [[ ${cur} == -* ]] ; then
-        COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
-        return 0
-    fi
-}
-complete -F _subapp subapp
diff --git a/files/usr/local/bin/move_old_logs.sh b/files/usr/local/bin/move_old_logs.sh
index 09d0d43..cdf2e8d 100644
--- a/files/usr/local/bin/move_old_logs.sh
+++ b/files/usr/local/bin/move_old_logs.sh
@@ -8,18 +8,21 @@ START_YEAR="2015"
 CURRENT_YEAR="$( date +%Y )"
 PREVIOUS_YEAR="$(( CURRENT_YEAR - 1 ))"
 
-cd "/var/log/subapp/${HOSTNAME}/" || exit 1
+for APP in subapp disapp; do
+	cd "/var/log/$APP/${HOSTNAME}/" || exit 1
 
-# create directories for old logfiles
-for YEAR in $( seq ${START_YEAR} ${PREVIOUS_YEAR} ); do
-	mkdir -p "old/${YEAR}"
-done
+	# create directories for old logfiles
+	for YEAR in $( seq ${START_YEAR} ${PREVIOUS_YEAR} ); do
+		mkdir -p "old/${YEAR}"
+	done
 
-# move all old logfiles
-for YEAR in $( seq ${START_YEAR} ${PREVIOUS_YEAR} ); do
-	if [[ -n $( find ./ -maxdepth 1 -name "Protokoll_SLUBArchiv_Erfolgreich-${YEAR}*.log" ) ]]; then mv Protokoll_SLUBArchiv_Erfolgreich-${YEAR}*.log "old/${YEAR}/"; fi
-	if [[ -n $( find ./ -maxdepth 1 -name "Protokoll_SLUBArchiv_FEHLER-${YEAR}*.log" ) ]]; then mv Protokoll_SLUBArchiv_FEHLER-${YEAR}*.log "old/${YEAR}/"; fi
-	if [[ -n $( find ./ -maxdepth 1 -name "sips.log.${YEAR}-*.lz" ) ]]; then mv sips.log.${YEAR}-*.lz "old/${YEAR}/"; fi
-	if [[ -n $( find ./ -maxdepth 1 -name "subapp.log.${YEAR}-*.lz" ) ]]; then mv subapp.log.${YEAR}-*.lz "old/${YEAR}/"; fi
-	if [[ -n $( find ./ -maxdepth 1 -name "webservice.log.${YEAR}-*.lz" ) ]]; then mv webservice.log.${YEAR}-*.lz "old/${YEAR}/"; fi
+	# move all old logfiles
+	for YEAR in $( seq ${START_YEAR} ${PREVIOUS_YEAR} ); do
+		if [[ -n $( find ./ -maxdepth 1 -name "Protokoll_SLUBArchiv_Erfolgreich-${YEAR}*.log" ) ]]; then mv Protokoll_SLUBArchiv_Erfolgreich-${YEAR}*.log "old/${YEAR}/"; fi
+		if [[ -n $( find ./ -maxdepth 1 -name "Protokoll_SLUBArchiv_FEHLER-${YEAR}*.log" ) ]]; then mv Protokoll_SLUBArchiv_FEHLER-${YEAR}*.log "old/${YEAR}/"; fi
+		if [[ -n $( find ./ -maxdepth 1 -name "sips.log.${YEAR}-*.lz" ) ]]; then mv sips.log.${YEAR}-*.lz "old/${YEAR}/"; fi
+		if [[ -n $( find ./ -maxdepth 1 -name "disapp.log.${YEAR}-*.lz" ) ]]; then mv disapp.log.${YEAR}-*.lz "old/${YEAR}/"; fi
+		if [[ -n $( find ./ -maxdepth 1 -name "subapp.log.${YEAR}-*.lz" ) ]]; then mv subapp.log.${YEAR}-*.lz "old/${YEAR}/"; fi
+		if [[ -n $( find ./ -maxdepth 1 -name "webservice.log.${YEAR}-*.lz" ) ]]; then mv webservice.log.${YEAR}-*.lz "old/${YEAR}/"; fi
+	done
 done
diff --git a/molecule/resources/playbooks/prepare.yml b/molecule/resources/playbooks/prepare.yml
index 0168634..77c4fdf 100644
--- a/molecule/resources/playbooks/prepare.yml
+++ b/molecule/resources/playbooks/prepare.yml
@@ -16,12 +16,14 @@
       become: true
     - name: add GPG key for SLUB Debian repository
       ansible.builtin.apt_key:
-        url: "https://sdvdebianrepo.slub-dresden.de/deb-repository/pub.gpg.key"
+        # url: "https://sdvdebianrepo.slub-dresden.de/deb-repository/pub.gpg.key"
+        url: "http://bdv141.slub-dresden.de/deb-repository/pub.gpg.key"
         state: present
       become: true
     - name: add repo URL to sources.list
       ansible.builtin.apt_repository:
-        repo: "deb https://sdvdebianrepo.slub-dresden.de/deb-repository bullseye main"
+        # repo: "deb https://sdvdebianrepo.slub-dresden.de/deb-repository bullseye main"
+        repo: "deb http://bdv141.slub-dresden.de/deb-repository lza-testing main"
         state: present
         update_cache: true
         mode: "0644"
diff --git a/tasks/configure_nfs_mounts.yml b/tasks/configure_nfs_mounts.yml
index ca8eaa0..a3df2f2 100644
--- a/tasks/configure_nfs_mounts.yml
+++ b/tasks/configure_nfs_mounts.yml
@@ -13,6 +13,10 @@
     - "{{ nfs_mounts_subapp.hosts[ansible_hostname]['import']['path'] | default('/mnt/import') }}"
     - "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_upload']['path'] | default('/home/import/upload') }}"
     - "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_download']['path'] | default('/home/import/download') }}"
+    - "/var/log/rosetta/{{ ansible_hostname }}/"
+    # - "/var/log/rosetta/{{ ansible_hostname }}/disapp/"
+    # - "/var/log/rosetta/{{ ansible_hostname }}/subapp/"
+    # - "/var/log/rosetta/{{ ansible_hostname }}/legacy/"
   register: stat_result
 - name: if dir doesn't exist, create it with correct permissions
   ansible.builtin.file:
@@ -24,20 +28,41 @@
   loop: "{{ stat_result.results }}"
   when: not item.stat.exists
 
-- name: Mounts für SubApp-Shares & Logs
+# - name: Mounts für SubApp-Shares & Logs LEGACY
+#   ansible.posix.mount:
+#     path: "{{ item.path }}"
+#     src: "{{ item.src | default(omit) }}"
+#     state: "{{ item.state | default('mounted') }}"
+#     fstype: nfs
+#     opts: "{{ item.opts | default( nfs_opts.v3 ) }}"
+#   loop:
+#     # LEGACY -> remove
+#       # we remove this mount in favor of separate mounts for logging ingests and access (but we can only do this once the rollout has happened and the sub-/dissapp are logging to their new locations, because otherwise the "Device is busy", so think of this as preparation)
+#     #- path: "/var/log/subapp/{{ ansible_hostname }}"
+#     #  state: unmounted
+#       # ... and replace it with a temporary mount that we can use to access old logs until they've been migrated to their new log directories.
+#     - path: "/var/log/rosetta/{{ ansible_hostname }}/"
+#       src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log_disapp']['nfs_share'] }}{{ ansible_hostname }}"
+#       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log_disapp']['nfs_opts'] }}"
+#   tags: [notest]
+
+- name: Mounts für SubApp-Shares & Logs NEW
   ansible.posix.mount:
     path: "{{ item.path }}"
-    src: "{{ item.src }}"
+    src: "{{ item.src | default(omit) }}"
     state: "{{ item.state | default('mounted') }}"
     fstype: nfs
     opts: "{{ item.opts | default( nfs_opts.v3 ) }}"
-  with_items:
-    - path: "/var/log/subapp/{{ ansible_hostname }}"
-      src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log']['nfs_share'] }}{{ ansible_hostname }}"
-      opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log']['nfs_opts'] }}"
+  loop:
+    # common Log  - use this once the migration to the separated dis-/subapp is finished
+    - path: "/var/log/rosetta/{{ ansible_hostname }}/"
+      src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log_disapp']['nfs_share'] }}{{ ansible_hostname }}/"
+      opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log_disapp']['nfs_opts'] }}"
+    # DisApp
     - path: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['access']['path'] | default('/mnt/' + ansible_hostname + '_access') }}"
       src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['access']['nfs_share'] }}"
       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['access']['nfs_opts'] }}"
+    # SubApp
     - path: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['ingest']['path'] | default('/mnt/' + ansible_hostname + '_ingest') }}"
       src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['ingest']['nfs_share'] }}"
       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['ingest']['nfs_opts'] }}"
@@ -48,20 +73,34 @@
       src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_upload']['nfs_share'] }}"
       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_upload']['nfs_opts'] }}"
   tags: [notest]
-- name: create subdirectory in Share before mounting it...
+- name: create subdirectories in Shares before mounting them...
   ansible.builtin.file:
-    path: "{{ paths.access.mountpoint }}/consumer_dir"
+    path: "{{ item.path }}"
     state: directory
-    mode: "0755"
-  tags: [notest]
-- name: ... and now mount it
+    mode: "{{ item.mode | default('0755') }}"
+  loop:
+    - path: "{{ paths.access.mountpoint }}/consumer_dir"
+      mode: "0770"
+    - path: "/var/log/rosetta/{{ ansible_hostname }}/disapp/"
+    - path: "/var/log/rosetta/{{ ansible_hostname }}/subapp/"
+  # tags: [notest]
+- name: ... and now mount them
   ansible.posix.mount:
     path: "{{ item.path }}"
     src: "{{ item.src }}"
     state: "{{ item.state | default('mounted') }}"
     fstype: nfs
     opts: "{{ item.opts | default( nfs_opts.v3 ) }}"
-  with_items:
+  loop:
+    # DisApp
+    - path: "/var/log/rosetta/{{ ansible_hostname }}/disapp/"
+      src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log_disapp']['nfs_share'] }}{{ ansible_hostname }}/disapp"
+      opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log_disapp']['nfs_opts'] }}"
+    # SubApp
+    - path: "/var/log/rosetta/{{ ansible_hostname }}/subapp"
+      src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log_subapp']['nfs_share'] }}{{ ansible_hostname }}/subapp"
+      opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log_subapp']['nfs_opts'] }}"
+    # SFTP
     - path: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_download']['path'] | default('/home/import/download') }}"
       src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_download']['nfs_share'] }}"
       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_download']['nfs_opts'] }}"
@@ -75,11 +114,15 @@
     group: "{{ item.group | default(omit) }}"
     mode: "{{ item.mode | default('0770') }}"
     state: "{{ item.state | default('directory') }}"
-  with_items:
-    - path: "{{ paths.log.mountpoint }}"
-      owner: "{{ paths.log.owner }}"
-      group: "{{ paths.log.group }}"
-      mode: "{{ paths.log.mode }}"
+  loop:
+    - path: "{{ paths.log_disapp.mountpoint }}"
+      owner: "{{ paths.log_disapp.owner }}"
+      group: "{{ paths.log_disapp.group }}"
+      mode: "{{ paths.log_disapp.mode }}"
+    - path: "{{ paths.log_subapp.mountpoint }}"
+      owner: "{{ paths.log_subapp.owner }}"
+      group: "{{ paths.log_subapp.group }}"
+      mode: "{{ paths.log_subapp.mode }}"
     - path: "{{ paths.access.mountpoint }}"
       owner: "{{ paths.access.owner }}"
       group: "{{ paths.access.group }}"
@@ -93,6 +136,8 @@
       group: "{{ paths.access.group }}"
       mode: "{{ paths.access.mode }}"
     - path: "{{ paths.sftp_download.mountpoint }}"
+      owner: "{{ paths.sftp_download.owner }}"
+      group: "{{ paths.sftp_download.group }}"
       mode: "{{ paths.sftp_download.mode }}"
     - path: "{{ paths.ingest.mountpoint }}"
       owner: "{{ paths.ingest.owner }}"
diff --git a/tasks/configure_processing_user.yml b/tasks/configure_processing_user.yml
index ec534c9..62b5af2 100644
--- a/tasks/configure_processing_user.yml
+++ b/tasks/configure_processing_user.yml
@@ -8,9 +8,25 @@
   ansible.builtin.copy:
     remote_src: true
     src: "/etc/skel/.vimrc"
-    dest: "/home/{{ vault_subapp_user }}/.vimrc"
+    dest: "/home/{{ vault_disapp_user }}/.vimrc"
     mode: "0644"
   when: vimrc_skel.stat.exists
+  loop:
+    - "{{ vault_disapp_user }}"
+    - "{{ vault_subapp_user }}"
+
+- name: configure .bashrc for DisApp user
+  ansible.builtin.blockinfile:
+    path: "/home/{{ vault_disapp_user }}/.bashrc"
+    backup: "no"
+    create: "yes"
+    owner: "{{ vault_disapp_user }}"
+    group: "{{ vault_disapp_group }}"
+    mode: "0644"
+    marker: "# {mark} ANSIBLE MANAGED BLOCK - DisApp-specific"
+    state: present
+    block: |
+      cd ~
 
 - name: configure .bashrc for SubApp user
   ansible.builtin.blockinfile:
@@ -50,6 +66,22 @@
 
       cd ~
 
+- name: Add aliases for DisApp user
+  ansible.builtin.blockinfile:
+    path: "/home/{{ vault_disapp_user }}/.bash_aliases"
+    backup: "no"
+    create: "yes"
+    owner: "{{ vault_disapp_user }}"
+    group: "{{ vault_disapp_group }}"
+    mode: "0644"
+    state: present
+    marker: "# {mark} ANSIBLE MANAGED BLOCK - DISAPP SPECIFIC"
+    block: |
+      # custom aliases
+      alias disapp='/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/disapp_rosetta.pl --config-file {{ vault_subapp_vars.files.disapp.path }}'    # DisApp Alias
+      alias disapp_log='tail -f /var/log/rosetta/${HOSTNAME}/disapp.log'        # show last log entries in disapp.log
+      alias disapp_monitor='curl localhost:9003'
+
 - name: Add aliases for SubApp user
   ansible.builtin.blockinfile:
     path: "/home/{{ vault_subapp_user }}/.bash_aliases"
diff --git a/tasks/install_subapp.yml b/tasks/install_subapp.yml
index f703249..c979b2f 100644
--- a/tasks/install_subapp.yml
+++ b/tasks/install_subapp.yml
@@ -1,19 +1,29 @@
 ---
 - name: Berechtigungen für Blockdevice ".subapp" korrigieren
   ansible.builtin.file:
-    path: "/home/{{ vault_subapp_user }}/.subapp/"
+    path: "{{ item.path }}"
     state: directory
-    owner: "{{ vault_subapp_user }}"
-    group: "{{ vault_subapp_group }}"
+    owner: "{{ item.owner }}"
+    group: "{{ item.group }}"
     mode: "0750"
+  loop:
+    - path: "/home/{{ vault_disapp_user }}/.disapp/"
+      owner: "{{ vault_disapp_user }}"
+      group: "{{ vault_disapp_group }}"
+    - path: "/home/{{ vault_subapp_user }}/.subapp/"
+      owner: "{{ vault_subapp_user }}"
+      group: "{{ vault_subapp_group }}"
 
 - name: configure PIDfile directory creation
   ansible.builtin.template:
-    src: "usr/lib/tmpfiles.d/subapp-pid-dir.conf.j2"
-    dest: "/usr/lib/tmpfiles.d/subapp-pid-dir.conf"
+    src: "usr/lib/tmpfiles.d/{{ item }}.j2"
+    dest: "/usr/lib/tmpfiles.d/{{ item }}"
     owner: "root"
     group: "root"
     mode: "0644"
+  loop:
+    - "subapp-pid-dir.conf"
+    - "disapp-pid-dir.conf"
   notify: create PIDfiles
 
 # erst nach der Erstellung der User/Gruppen durchführen!
@@ -24,6 +34,7 @@
   ansible.builtin.apt:
     name:
       - 'submissionapplication4rosetta'
+      - "submission-application4rosetta"
       # - 'libio-async-perl'                 # offical Debian package
       # - 'libio-aio-perl'                   # offical Debian package
       # - 'libparallel-parallel-map-perl'    # packaged by SLUBArchiv.digital from CPAN
@@ -31,10 +42,14 @@
     state: absent
     autoclean: true
     autoremove: true
-- name: Submission Application installieren
+- name: Submission / Dissemination Application installieren
   ansible.builtin.apt:
-    name: "submission-application4rosetta"
-    state: present
+    name: [
+      "common-application4rosetta",
+      "dissemination-application4rosetta",
+      "submission-application4rosetta",
+    ]
+    state: latest
     allow_unauthenticated: "true"
 
 - name: Systemd-Unitfiles installieren (Templates)
@@ -43,8 +58,9 @@
     dest: "/etc/systemd/user/{{ item }}"
     owner: "{{ vault_subapp_vars.files.subapp.owner }}"
     group: "{{ vault_subapp_vars.files.subapp.group }}"
-    mode: "{{ vault_subapp_vars.files.subapp.mode }}"
+    mode: "{{ vault_subapp_vars.files.subapp.mode | default('0400') }}"
   loop:
+    - "disapp.service"
     - "subapp.service"
     - "webservice_status_SLUBarchiv.service"
   notify:
@@ -74,10 +90,11 @@
 - name: check which Services are enabled
   ansible.builtin.command: "systemctl is-enabled {{ item }}"
   loop:
-    - "webservice_status_SLUBarchiv.service"
-    - "subapp.service"
     - "chmod_sip_uploads.service"
     - "chown_dip_access.service"
+    - "disapp.service"
+    - "subapp.service"
+    - "webservice_status_SLUBarchiv.service"
   register: subapp_services_enabled
   changed_when: false
   failed_when:
@@ -128,13 +145,21 @@
         mode: "{{ vault_subapp_vars.files.blacklist.mode }}"
         state: touch
       when: not blacklist_file_exists.stat.exists
+    - name: write new DisApp config file
+      ansible.builtin.template:
+        src: "disapp.cfg.j2"
+        dest: "{{ vault_subapp_vars.files.disapp.path }}"
+        owner: "{{ vault_subapp_vars.files.disapp.owner }}"
+        group: "{{ vault_subapp_vars.files.disapp.group }}"
+        mode: "{{ vault_subapp_vars.files.disapp.mode | default('0400') }}"
+      with_items: "{{ vault_subapp_vars.hosts[ansible_hostname] | default(vault_subapp_vars.hosts['sdvlzasubappmoleculetest']) }}"
     - name: write new SubApp config file
       ansible.builtin.template:
         src: "subapp.cfg.j2"
         dest: "{{ vault_subapp_vars.files.subapp.path }}"
         owner: "{{ vault_subapp_vars.files.subapp.owner }}"
         group: "{{ vault_subapp_vars.files.subapp.group }}"
-        mode: "{{ vault_subapp_vars.files.subapp.mode }}"
+        mode: "{{ vault_subapp_vars.files.subapp.mode | default('0400')}}"
       with_items: "{{ vault_subapp_vars.hosts[ansible_hostname] | default(vault_subapp_vars.hosts['sdvlzasubappmoleculetest']) }}"
 
 - name: Quarantaeneverzeichnis & Lockverzeichnis anlegen
@@ -153,23 +178,52 @@
   ansible.builtin.file:
     src: "{{ item.src }}"
     dest: "{{ item.dest }}"
-    state: link
-    owner: "{{ vault_subapp_user }}"
-    group: "{{ vault_subapp_group }}"
-  with_items:
+    state: "{{ item.state | default('link') }}"
+    owner: "{{ item.owner }}"
+    group: "{{ item.group }}"
+  loop:
+    # SubApp
     - src: "{{ vault_subapp_vars.files.subapp.path }}"
       dest: "/home/{{ vault_subapp_user }}/.subapp.cfg"
-    - src: "/var/log/subapp/{{ ansible_hostname }}"
+      owner: "{{ vault_subapp_user }}"
+      group: "{{ vault_subapp_group }}"
+    - src: "/var/log/rosetta/{{ ansible_hostname }}/subapp/"
       dest: "/home/{{ vault_subapp_user }}/.subapp/{{ ansible_hostname }}"
+      owner: "{{ vault_subapp_user }}"
+      group: "{{ vault_subapp_group }}"
+    # DisApp
+    - src: "{{ vault_subapp_vars.files.disapp.path }}"
+      dest: "/home/{{ vault_disapp_user }}/.disapp.cfg"
+      owner: "{{ vault_disapp_user }}"
+      group: "{{ vault_disapp_group }}"
+    - src: "/var/log/rosetta/{{ ansible_hostname }}/disapp/"
+      dest: "/home/{{ vault_disapp_user }}/.disapp/{{ ansible_hostname }}"
+      owner: "{{ vault_disapp_user }}"
+      group: "{{ vault_disapp_group }}"
 
-# Bash-Completion einspielen
-- name: Konfiguration für Bash-Completion einspielen
-  ansible.builtin.copy:
-    src: "home/{{ vault_subapp_user }}/.bash_completion"
-    dest: "/home/{{ vault_subapp_user }}/.bash_completion"
-    owner: "{{ vault_subapp_user }}"
-    group: "{{ vault_subapp_group }}"
-    mode: "0644"
+# Bash-Completion funktioniert ab 2020.2 anders, s. Abschnitt AUTOCOMPLETION in perldoc bin/subapp_rosetta.pl und bin/disapp_rosetta.pl
+- name: Bash-Completion aktivieren
+  ansible.builtin.shell:
+    chdir: "/usr/local/bin/"
+    cmd: "{{ item }}"
+    executable: "/usr/bin/bash"
+  loop:
+    - "complete -C subapp_rosetta.pl subapp_rosetta.pl"
+    - "complete -C disapp_rosetta.pl disapp_rosetta.pl"
+  changed_when: false
+
+- name: alte Bash-Completion entfernen
+  ansible.builtin.file:
+    path: "/home/{{ vault_subapp_user }}/.bash_completion"
+    state: absent
+
+# - name: Konfiguration für Bash-Completion einspielen
+#   ansible.builtin.copy:
+#     src: "home/{{ vault_subapp_user }}/.bash_completion"
+#     dest: "/home/{{ vault_subapp_user }}/.bash_completion"
+#     owner: "{{ vault_subapp_user }}"
+#     group: "{{ vault_subapp_group }}"
+#     mode: "0644"
 
 - name: alte Stichproben entfernen
   block:
diff --git a/tasks/install_ta_tools.yml b/tasks/install_ta_tools.yml
index 5a50886..63bbce2 100644
--- a/tasks/install_ta_tools.yml
+++ b/tasks/install_ta_tools.yml
@@ -16,7 +16,8 @@
   ansible.builtin.copy:
     src: "/tmp/tools-for-technical-analysts/{{ item.src }}"
     dest: "{{ item.dest }}"
-    mode: "0644"
+    mode: "{{ item.mode | default('0755') }}"
+    directory_mode: "0755"    # set this, or dirs below the dest directories may become untraversable!
     remote_src: true
   loop:
     - src: "bin/"
diff --git a/templates/disapp.cfg.j2 b/templates/disapp.cfg.j2
new file mode 100644
index 0000000..22c25e1
--- /dev/null
+++ b/templates/disapp.cfg.j2
@@ -0,0 +1,79 @@
+###########################
+### OPTIONAL PARAMETERS ###
+###########################
+
+
+
+### optional processing and log settings
+
+# Absolute path to a PID file
+pid_file:/run/disapp/disapp_bagit.pid
+# Log4perl log level
+logger_min_level:debug
+
+
+
+############################
+### MANDATORY PARAMETERS ###
+############################
+
+
+
+### internal working directories
+
+# Absolute path to the directory that is used for controlling, processing and storing exports from Rosetta.
+# Restore requests need to be put below directory_export/consumer_dir/.
+# Restored IEs can be found in directory_export/rosetta_export/.
+directory_export:/mnt/{{ ansible_hostname }}_access/
+# Absolute path to the directory which is used for placing lockfiles into for SIPs when their processing commences.
+directory_lock:/home/{{ vault_disapp_user }}/.disapp/lockdir/
+
+
+
+### user/group settings
+
+# Name of the Linux user that owns the subapp's access directory.
+owner_user_export_dir:{{ vault_disapp_user }}
+# Name of the Linux group that owns the subapp's access directory.
+owner_group_export_dir:import
+
+
+
+### Rosetta settings
+
+# FQDN of a) the host that the Rosetta application with the DEP role is running on or b) the load balancer that is in front of a Rosetta application cluster
+rosetta_host:{{ vault_subapp_vars.hosts[ansible_hostname].RosettaHost | default("ROSETTA_HOSTNAME_TEMPLATE") }}
+# FQDN of the host that the PDS authentication server is running on
+rosetta_pdshost:{{ vault_subapp_vars.hosts[ansible_hostname].PdsHost | default("PDS_HOSTNAME_TEMPLATE") }}
+# name of the institution in Rosetta that the subapp will ingest its SIPs into
+rosetta_institute:{{ vault_subapp_vars.hosts[ansible_hostname].Institute | default("INSTITUTE_NAME_TEMPLATE") }}
+# Material Flow ID of the Material Flow that will be used for processing SIPs in Rosetta
+rosetta_materialflowId:{{ vault_subapp_vars.hosts[ansible_hostname].MaterialFlowID | default("MATERIAL_FLOW_ID_TEMPLATE") }}
+# username of the user that the subapp will use for authentication against PDS/Rosetta
+rosetta_user:{{ vault_subapp_vars.hosts[ansible_hostname].User | default("SUBMISSION_APPLICATION_USER_TEMPLATE") }}
+# password of that user
+rosetta_password:{{ vault_subapp_vars.hosts[ansible_hostname].Rosetta_Password }}
+
+
+
+### processing configuration
+
+# Workflow name as agreed upon in the contract between producer and archive.
+fullname_workflow:{{ vault_subapp_vars.hosts[ansible_hostname].fullname_workflow | default("WORKFLOW_NAME_TEMPLATE") }}
+
+
+
+### email notification configuration
+
+# notification email address for consumers
+logger_consumer_email:{{ vault_subapp_vars.hosts[ansible_hostname].logger_producer_email | default("LOGGER_PRODUCER_EMAIL_TEMPLATE") }}
+# notification email address for archive staff (low level error information)
+logger_staff_email:{{ vault_subapp_vars.hosts[ansible_hostname].logger_staff_email | default("LOGGER_STAFF_EMAIL_TEMPLATE") }}
+
+
+
+### database configuration
+
+# Absolute path to SQLite database file for storing and loading message queues and SIP states
+# Hints: using /tmp is not allowed by SQLite, furthermore security requires the parent directory to be set to at least '750' (drwxr-x---)
+database_file:/home/{{ vault_disapp_user }}/.disapp/disapp.db
diff --git a/templates/etc/systemd/user/disapp.service.j2 b/templates/etc/systemd/user/disapp.service.j2
new file mode 100644
index 0000000..5786c0a
--- /dev/null
+++ b/templates/etc/systemd/user/disapp.service.j2
@@ -0,0 +1,59 @@
+[Unit]
+Description=SLUBArchiv Bagit-based Dissemination Application
+Documentation=man:disapp(7)
+After=remote-fs.target
+
+[Service]
+Type=forking
+Restart=no
+Environment="PERL5LIB=/usr/local/perl/"
+ExecStartPre=/bin/bash -c '\
+    BLOCKFILE="/home/{{ vault_subapp_user }}/.disapp/BLOCKFILE"; \
+    if [[ -e "$BLOCKFILE" ]]; then \
+        echo "Startup of DisApp is blocked by $BLOCKFILE."; \
+        cat $BLOCKFILE; \
+        exit 1; \
+    fi'
+ExecStart=/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/disapp_rosetta.pl --config-file {{ vault_subapp_vars.files.disapp.path }} --start
+ExecStop=/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/disapp_rosetta.pl --stop
+User={{ vault_disapp_user }}
+Group={{ vault_disapp_group }}
+# EXAMPLE: TimeoutSec=600
+TimeoutSec=infinity
+# DO NOT REMOVE!!! (based on SubApp Issue #68)
+# Do not kill any processes for a save shutdowns with a valid DisApp database.
+KillMode=none
+
+### Stability features
+# DEACTIVATED FOR DEBIAN 10, AS SYSTEMD DOESN'T SEEM TO SUPPORT THEM YET.
+# documented at "man (5) systemd.service" and
+# https://www.freedesktop.org/software/systemd/man/systemd.service.html
+#OOMPolicy=stop
+# documented at "man (5) systemd.exec" and
+# https://www.freedesktop.org/software/systemd/man/systemd.exec.html
+OOMScoreAdjust=-900
+
+### Security features
+# documented at "man (5) systemd.exec" and
+# https://www.freedesktop.org/software/systemd/man/systemd.exec.html
+# DEACTIVATED FOR DEBIAN 10, AS SYSTEMD DOESN'T SEEM TO SUPPORT THEM YET.
+ProtectSystem=full
+ProtectHostname=true
+ProtectClock=true
+ProtectKernelTunables=true
+ProtectKernelModules=true
+ProtectKernelLogs=true
+ProtectControlGroups=true
+LockPersonality=true
+#MemoryDenyWriteExecute=true
+RestrictRealtime=true
+RestrictSUIDSGID=true
+## RemoveIPC=true
+## PrivateMounts=true
+## MountFlags=
+## SystemCallFilter is a Whitelist!!!
+#SystemCallFilter=@aio,@basic-io,@debug,@file-system,@network-io
+#SystemCallErrorNumber=1337
+
+[Install]
+WantedBy=multi-user.target
diff --git a/templates/etc/systemd/user/subapp.service.j2 b/templates/etc/systemd/user/subapp.service.j2
index 818d1a1..248fcb2 100644
--- a/templates/etc/systemd/user/subapp.service.j2
+++ b/templates/etc/systemd/user/subapp.service.j2
@@ -14,26 +14,8 @@ ExecStartPre=/bin/bash -c '\
         cat $BLOCKFILE; \
         exit 1; \
     fi'
-ExecStart=/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_bagit.pl --config-file {{ vault_subapp_vars.files.subapp.path }} --start
-ExecStop=/bin/bash -c '\
-    PID=$(cat /run/subapp/subapp_bagit.pid); \
-    if [[ ! "$PID" =~ ^[0-9]+$ ]]; then \
-        echo "something broke, no valid PID for submission application daemon"; \
-        exit 1; \
-    fi; \
-    echo "sending --stop to submission application daemon" | systemd-cat -p info; \
-    /usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_bagit.pl --config-file {{ vault_subapp_vars.files.subapp.path }} --stop; \
-    echo " waiting for submission application daemon to finish" | systemd-cat -p info; \
-    while true; do \
-        ps -p $PID > /dev/null; \
-        if [[ $? != 0 ]]; then \
-            break; \
-        fi; \
-        echo "submission application daemon is still running, waiting another 5 seconds" | systemd-cat -p warning; \
-        sleep 5; \
-    done; \
-    echo "submission application daemon stopped" | systemd-cat -p info'
-
+ExecStart=/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_rosetta.pl --config-file {{ vault_subapp_vars.files.subapp.path }} --start
+ExecStop=/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_rosetta.pl --stop
 User={{ vault_subapp_user }}
 Group={{ vault_subapp_group }}
 # EXAMPLE: TimeoutSec=600
@@ -43,10 +25,9 @@ TimeoutSec=infinity
 KillMode=none
 
 ### Stability features
-# DEACTIVATED FOR DEBIAN 10, AS SYSTEMD DOESN'T SEEM TO SUPPORT THEM YET.
 # documented at "man (5) systemd.service" and
 # https://www.freedesktop.org/software/systemd/man/systemd.service.html
-#OOMPolicy=stop
+OOMPolicy=stop
 # documented at "man (5) systemd.exec" and
 # https://www.freedesktop.org/software/systemd/man/systemd.exec.html
 OOMScoreAdjust=-900
@@ -54,19 +35,18 @@ OOMScoreAdjust=-900
 ### Security features
 # documented at "man (5) systemd.exec" and
 # https://www.freedesktop.org/software/systemd/man/systemd.exec.html
-# DEACTIVATED FOR DEBIAN 10, AS SYSTEMD DOESN'T SEEM TO SUPPORT THEM YET.
-#ProtectSystem=strict
+ProtectSystem=full
 ## ProtectHome=read-only
-#ProtectHostname=true
-#ProtectClock=true
-#ProtectKernelTunables=true
-#ProtectKernelModules=true
-#ProtectKernelLogs=true
-#ProtectControlGroups=true
-#LockPersonality=true
+ProtectHostname=true
+ProtectClock=true
+ProtectKernelTunables=true
+ProtectKernelModules=true
+ProtectKernelLogs=true
+ProtectControlGroups=true
+LockPersonality=true
 #MemoryDenyWriteExecute=true
-#RestrictRealtime=true
-#RestrictSUIDSGID=true
+RestrictRealtime=true
+RestrictSUIDSGID=true
 ## RemoveIPC=true
 ## PrivateMounts=true
 ## MountFlags=
diff --git a/templates/etc/systemd/user/webservice_status_SLUBarchiv.service.j2 b/templates/etc/systemd/user/webservice_status_SLUBarchiv.service.j2
index 46d5609..18a408e 100644
--- a/templates/etc/systemd/user/webservice_status_SLUBarchiv.service.j2
+++ b/templates/etc/systemd/user/webservice_status_SLUBarchiv.service.j2
@@ -13,18 +13,18 @@ User={{ vault_subapp_user }}
 ### Security features
 # documented at https://www.freedesktop.org/software/systemd/man/systemd.exec.html
 # DEACTIVATED FOR DEBIAN 10, AS SYSTEMD DOESN'T SEEM TO SUPPORT THEM YET.
-#ProtectSystem=strict
+ProtectSystem=full
 #ProtectHome=read-only
-#ProtectHostname=true
-#ProtectClock=true
-#ProtectKernelTunables=true
-#ProtectKernelModules=true
-#ProtectKernelLogs=true
-#ProtectControlGroups=true
-#LockPersonality=true
+ProtectHostname=true
+ProtectClock=true
+ProtectKernelTunables=true
+ProtectKernelModules=true
+ProtectKernelLogs=true
+ProtectControlGroups=true
+LockPersonality=true
 #MemoryDenyWriteExecute=true
-#RestrictRealtime=true
-#RestrictSUIDSGID=true
+RestrictRealtime=true
+RestrictSUIDSGID=true
 ## RemoveIPC=true
 ## PrivateMounts=true
 ## MountFlags=
diff --git a/templates/subapp.cfg.j2 b/templates/subapp.cfg.j2
index 9aa3754..70e6909 100644
--- a/templates/subapp.cfg.j2
+++ b/templates/subapp.cfg.j2
@@ -9,7 +9,7 @@
 # Absolute path to a file that contains a CSV list of SIPs that should be ignored by the subapp. One entry per line. You can create the file and leave it empty if you like; subapp will then act as if there was no blacklist at all and process all the SIPs.
 blacklist_sip_file:/home/{{ vault_subapp_user }}/.subapp/usa_blacklist_file.csv
 # Absolute path to a PID file
-pid_file:/run/subapp/subapp_bagit.pid
+pid_file:/run/subapp/subapp_rosetta.pid
 # Log4perl log level
 logger_min_level:debug
 # Blocking AIP updates
@@ -25,10 +25,6 @@ ingest_only:{{ vault_subapp_vars.hosts[ansible_hostname].ingest_only | default("
 
 ### internal working directories
 
-# Absolute path to the directory that is used for controlling, processing and storing exports from Rosetta.
-# Restore requests need to be put below directory_export/consumer_dir/.
-# Restored IEs can be found in directory_export/rosetta_export/.
-directory_export:/mnt/{{ ansible_hostname }}_access/
 # Absolute path to the directory which is used for placing lockfiles into for SIPs when their processing commences.
 directory_lock:/home/{{ vault_subapp_user }}/.subapp/lockdir/
 # Absolute path to the directory which contains symlinks to the SIPs that encountered errors during the processing and are put into quarantine. These SIPs will be ignored until the error is resolved.
@@ -50,8 +46,6 @@ owner_user_ingest_dir:{{ vault_subapp_user }}
 owner_group_import_dir:import
 # Name of the Linux group that owns the subapp's ingest directory.
 owner_group_ingest_dir:{{ vault_subapp_group }}
-# Name of the Linux group that owns the subapp's access directory.
-owner_group_export_dir:access
 
 
 
diff --git a/templates/usr/lib/tmpfiles.d/disapp-pid-dir.conf.j2 b/templates/usr/lib/tmpfiles.d/disapp-pid-dir.conf.j2
new file mode 100644
index 0000000..3a8de19
--- /dev/null
+++ b/templates/usr/lib/tmpfiles.d/disapp-pid-dir.conf.j2
@@ -0,0 +1 @@
+d /run/disapp 0750 {{ vault_disapp_user }} {{ vault_disapp_group }} -
-- 
GitLab