diff --git a/files/etc/profile.d/bash_functions_ingest.sh b/files/etc/profile.d/bash_functions_ingest.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b1d719a94ff1aea5c7bfb445f9e0c18154f397e1
--- /dev/null
+++ b/files/etc/profile.d/bash_functions_ingest.sh
@@ -0,0 +1,12 @@
+function create-sip () {
+	if [[ ! -d ~/submissionapplications4rosetta/ ]]; then
+		git clone --quiet --depth 10 \
+			https://git.slub-dresden.de/slub-digitalpreservation/submissionapplications4rosetta.git \
+			~/submissionapplications4rosetta
+	fi
+	cd ~/submissionapplications4rosetta
+	git pull
+	perl	-I ~/submissionapplications4rosetta/lib/ \
+		-I /usr/local/perl/ \
+		-e 'use Test::GeneratorSIPs; generate_unique_bagit_sip_in({ sip_type => "small", config => "/home/processing/.subapp.cfg" });'
+}
diff --git a/files/home/processing/.bash_completion b/files/home/processing/.bash_completion
deleted file mode 100644
index 016c0c927a508680d0c0677a98f3952cb6ac8998..0000000000000000000000000000000000000000
--- a/files/home/processing/.bash_completion
+++ /dev/null
@@ -1,49 +0,0 @@
-# installation help:
-#   1. copy into home directory of Submission Application user, e.g. /home/processing
-#   2. edit IMPORT_DIR (path to Submission Application Import directory)
-#   3. restart bash
-#   * requires bash-completion helper function _filedir, e.g. built in Debian
-#   * requires alias 'subapp' for execution of Submission Application,
-#     e.g. adding 'alias subapp='/usr/bin/perl -I /usr/local/perl /usr/local/bin/subapp_bagit.pl' to ~/.bashrc
-_pushd () {
-    command pushd "$@" > /dev/null
-}
-_popd () {
-    command popd "$@" > /dev/null
-}
-_subapp()
-{
-    IMPORT_DIR="/mnt/import"
-    local cur prev first opts
-    COMPREPLY=()
-    cur="${COMP_WORDS[COMP_CWORD]}"
-    prev="${COMP_WORDS[COMP_CWORD-1]}"
-    first="${COMP_WORDS[1]}"
-    opts="\
---help \
---man \
---config-file \
---single_run \
---reset_failed_preingest \
---force_restore_lza_id \
---permanent_report \
---start \
---status \
---stop \
---dismantle-orders"
-
-    case "$first" in
-        "--reset_failed_preingest")
-            _pushd "$IMPORT_DIR"
-            _filedir -d
-            _popd
-            return
-            ;;
-    esac
-
-    if [[ ${cur} == -* ]] ; then
-        COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
-        return 0
-    fi
-}
-complete -F _subapp subapp
diff --git a/files/usr/local/bin/move_old_logs.sh b/files/usr/local/bin/move_old_logs.sh
index 09d0d43383e7bed0b0c1308ec6dedab73ad6d4b4..e47adc7d91ad5c8a898adea0756e1cd495054606 100644
--- a/files/usr/local/bin/move_old_logs.sh
+++ b/files/usr/local/bin/move_old_logs.sh
@@ -8,18 +8,54 @@ START_YEAR="2015"
 CURRENT_YEAR="$( date +%Y )"
 PREVIOUS_YEAR="$(( CURRENT_YEAR - 1 ))"
 
-cd "/var/log/subapp/${HOSTNAME}/" || exit 1
+[[ -n ${1} ]] && APP="${1}"
+# shellcheck disable=SC2016
+# ...because we don't want the expansion to happen in the log message
+[[ (! ${APP} =~ "disapp") && (! ${APP} =~ "subapp") && (! ${APP} =~ "subapp_webservice") ]] && \
+	echo 'ERROR: $1 needs to be one of "disapp", "subapp" or "subapp_webservice"'
 
-# create directories for old logfiles
-for YEAR in $( seq ${START_YEAR} ${PREVIOUS_YEAR} ); do
-	mkdir -p "old/${YEAR}"
-done
+cd "/var/log/${APP}/" || exit 1
+
+# Initially, this script will ALWAYS be called by root. This is because we have
+# to switch users depending on the workflow that we're working with. Also, we
+# HAVE to switch to a non-root user, because the NFS-share prohibits write
+# operations from the root user.
+# To solve this, we check if the script is run by root, and if it is, we run
+# the script again with the correct user by calling `exec`.
+# Once `exec` is called, the script terminates and is called again with the
+# new user, which makes the UID check skip and executes the move operations
+# below.
+if [ $UID -eq 0 ]; then
+	if [[ "${APP}" == "disapp" ]]; then
+		exec su "access" "$0" "$@"
+		# nothing will be executed beyond that line,
+		# because exec replaces running process with the new one
+	fi
+	if [[ "${APP}" == "subapp" ]]; then
+		exec su "processing" "$0" "$@"
+		# nothing will be executed beyond that line,
+		# because exec replaces running process with the new one
+	fi
+	if [[ "${APP}" == "subapp_webservice" ]]; then
+		exec su "processing" "$0" "$@"
+		# nothing will be executed beyond that line,
+		# because exec replaces running process with the new one
+	fi
+fi
 
-# move all old logfiles
+# Execution resumes here, if we're a non-root user.
+cd "/var/log/${APP}/" || exit 1
 for YEAR in $( seq ${START_YEAR} ${PREVIOUS_YEAR} ); do
+	mkdir -p "old/${YEAR}";
+	# DISapp
+	if [[ -n $( find ./ -maxdepth 1 -name "disapp.log.${YEAR}-*.lz" ) ]]; then mv disapp.log.${YEAR}-*.lz "old/${YEAR}/"; fi
+	# SUBapp
 	if [[ -n $( find ./ -maxdepth 1 -name "Protokoll_SLUBArchiv_Erfolgreich-${YEAR}*.log" ) ]]; then mv Protokoll_SLUBArchiv_Erfolgreich-${YEAR}*.log "old/${YEAR}/"; fi
 	if [[ -n $( find ./ -maxdepth 1 -name "Protokoll_SLUBArchiv_FEHLER-${YEAR}*.log" ) ]]; then mv Protokoll_SLUBArchiv_FEHLER-${YEAR}*.log "old/${YEAR}/"; fi
 	if [[ -n $( find ./ -maxdepth 1 -name "sips.log.${YEAR}-*.lz" ) ]]; then mv sips.log.${YEAR}-*.lz "old/${YEAR}/"; fi
 	if [[ -n $( find ./ -maxdepth 1 -name "subapp.log.${YEAR}-*.lz" ) ]]; then mv subapp.log.${YEAR}-*.lz "old/${YEAR}/"; fi
+	if [[ -n $( find ./ -maxdepth 1 -name "producer_mails.log.${YEAR}-*.lz" ) ]]; then mv producer_mails.log.${YEAR}-*.lz "old/${YEAR}/"; fi
+	if [[ -n $( find ./ -maxdepth 1 -name "staff_mails.log.${YEAR}-*.lz" ) ]]; then mv staff_mails.log.${YEAR}-*.lz "old/${YEAR}/"; fi
+	# SUBapp Webservice
 	if [[ -n $( find ./ -maxdepth 1 -name "webservice.log.${YEAR}-*.lz" ) ]]; then mv webservice.log.${YEAR}-*.lz "old/${YEAR}/"; fi
 done
diff --git a/files/etc/systemd/user/move_old_logs.timer b/files/usr/local/lib/systemd/system/move_old_logs.timer
similarity index 63%
rename from files/etc/systemd/user/move_old_logs.timer
rename to files/usr/local/lib/systemd/system/move_old_logs.timer
index 653982216f37bda17ef0200ae4fdd3fc0544a920..7969f7e06007047fece0e9feabdaaa257866c8df 100644
--- a/files/etc/systemd/user/move_old_logs.timer
+++ b/files/usr/local/lib/systemd/system/move_old_logs.timer
@@ -1,10 +1,10 @@
 [Unit]
-Description=timer for move_old_logs.service
+Description=timer for move_old_logs_@.service
 
 [Timer]
 # run once a year, on the 1st of January at 05:00:00am.
 OnCalendar=*-1-1 05:00:00
-Unit=move_old_logs.service
+Unit=move_old_logs_@.service
 
 [Install]
 WantedBy=default.target
diff --git a/meta/main.yml b/meta/main.yml
index 3503473ddfeff656a7eb814e5091c3c7a220676f..0ec57be17fbee280cd68957edb1db85781d5b1b2 100644
--- a/meta/main.yml
+++ b/meta/main.yml
@@ -18,7 +18,6 @@ galaxy_info:
     # If this a Container Enabled role, provide the minimum Ansible Container version. min_ansible_container_version: Optionally specify the branch Galaxy will use when accessing the GitHub repo
     # for this role. During role install, if no tags are available, Galaxy will use this branch. During import Galaxy will access files on this branch. If Travis integration is configured, only
     # notifications for this branch will be accepted. Otherwise, in all cases, the repo's default branch (usually master) will be used. github_branch:
-  namespace: "slub"
   # Provide a list of supported platforms, and for each platform a list of versions. If you don't wish to enumerate all versions for a particular platform, use 'all'. To view available
   # platforms and versions (or releases), visit: https://galaxy.ansible.com/api/v1/platforms/
   #
diff --git a/molecule/resources/playbooks/prepare.yml b/molecule/resources/playbooks/prepare.yml
index 0168634943b961904f68d89b16719aaa104dfcaa..77c4fdfdea16d96375a06ee059bd1226a2132a6c 100644
--- a/molecule/resources/playbooks/prepare.yml
+++ b/molecule/resources/playbooks/prepare.yml
@@ -16,12 +16,14 @@
       become: true
     - name: add GPG key for SLUB Debian repository
       ansible.builtin.apt_key:
-        url: "https://sdvdebianrepo.slub-dresden.de/deb-repository/pub.gpg.key"
+        # url: "https://sdvdebianrepo.slub-dresden.de/deb-repository/pub.gpg.key"
+        url: "http://bdv141.slub-dresden.de/deb-repository/pub.gpg.key"
         state: present
       become: true
     - name: add repo URL to sources.list
       ansible.builtin.apt_repository:
-        repo: "deb https://sdvdebianrepo.slub-dresden.de/deb-repository bullseye main"
+        # repo: "deb https://sdvdebianrepo.slub-dresden.de/deb-repository bullseye main"
+        repo: "deb http://bdv141.slub-dresden.de/deb-repository lza-testing main"
         state: present
         update_cache: true
         mode: "0644"
diff --git a/tasks/configure_bash.yml b/tasks/configure_bash.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3bcec2a2090272e895f35e416e0e8c6f8a58210c
--- /dev/null
+++ b/tasks/configure_bash.yml
@@ -0,0 +1,20 @@
+---
+- name: add more Bash aliases
+  ansible.builtin.blockinfile:
+    path: "/etc/profile.d/bash_aliases.sh"
+    create: true
+    owner: "root"
+    group: "root"
+    mode: "0644"
+    marker: "# {mark} ANSIBLE MANAGED BLOCK - Subapp/DisApp all users"
+    block: |
+      alias subapp-version="dpkg -l | grep 'application4rosetta'"
+      alias disapp-version="dpkg -l | grep 'application4rosetta'"
+
+- name: add Bash functions
+  ansible.builtin.copy:
+    src: "etc/profile.d/bash_functions_ingest.sh"
+    dest: "/etc/profile.d/bash_functions_ingest.sh"
+    mode: "0644"
+    owner: "root"
+    group: "root"
diff --git a/tasks/configure_nfs_mounts.yml b/tasks/configure_nfs_mounts.yml
index ca8eaa05b22ec96c0106318bb6b99da6b12807de..359c4c1b5fd5f8ac357b25a9ad8b640abbaac8db 100644
--- a/tasks/configure_nfs_mounts.yml
+++ b/tasks/configure_nfs_mounts.yml
@@ -13,6 +13,9 @@
     - "{{ nfs_mounts_subapp.hosts[ansible_hostname]['import']['path'] | default('/mnt/import') }}"
     - "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_upload']['path'] | default('/home/import/upload') }}"
     - "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_download']['path'] | default('/home/import/download') }}"
+    - "{{ paths.log_disapp.mountpoint }}"
+    - "{{ paths.log_subapp.mountpoint }}"
+    - "{{ paths.log_subapp_ws.mountpoint }}"
   register: stat_result
 - name: if dir doesn't exist, create it with correct permissions
   ansible.builtin.file:
@@ -24,45 +27,70 @@
   loop: "{{ stat_result.results }}"
   when: not item.stat.exists
 
-- name: Mounts für SubApp-Shares & Logs
+- name: Mounts für SubApp-Shares & Logs NEW
   ansible.posix.mount:
     path: "{{ item.path }}"
-    src: "{{ item.src }}"
+    src: "{{ item.src | default(omit) }}"
     state: "{{ item.state | default('mounted') }}"
-    fstype: nfs
+    fstype: "{{ item.fstype | default('nfs') }}"
     opts: "{{ item.opts | default( nfs_opts.v3 ) }}"
-  with_items:
-    - path: "/var/log/subapp/{{ ansible_hostname }}"
-      src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log']['nfs_share'] }}{{ ansible_hostname }}"
+  loop:
+    # common Log  - use this once the migration to the separated dis-/subapp is finished
+    - path: "/mnt/logs/"
+      src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log']['nfs_share'] }}{{ ansible_hostname }}/"
       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['log']['nfs_opts'] }}"
-    - path: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['access']['path'] | default('/mnt/' + ansible_hostname + '_access') }}"
+    # DisApp
+    - path: "{{ paths.access.mountpoint }}"
       src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['access']['nfs_share'] }}"
       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['access']['nfs_opts'] }}"
-    - path: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['ingest']['path'] | default('/mnt/' + ansible_hostname + '_ingest') }}"
+    # SubApp
+    - path: "{{ paths.ingest.mountpoint }}"
       src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['ingest']['nfs_share'] }}"
       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['ingest']['nfs_opts'] }}"
-    - path: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['import']['path'] | default('/mnt/import') }}"
+    - path: "{{ paths.import.mountpoint }}"
       src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['import']['nfs_share'] }}"
       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['import']['nfs_opts'] }}"
-    - path: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_upload']['path'] | default('/home/import/upload') }}"
+    - path: "{{ paths.sftp_upload.mountpoint }}"
       src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_upload']['nfs_share'] }}"
       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_upload']['nfs_opts'] }}"
   tags: [notest]
-- name: create subdirectory in Share before mounting it...
+- name: create subdirectories in Shares before mounting them...
   ansible.builtin.file:
-    path: "{{ paths.access.mountpoint }}/consumer_dir"
+    path: "{{ item.path }}"
     state: directory
-    mode: "0755"
-  tags: [notest]
-- name: ... and now mount it
+    mode: "{{ item.mode | default('0755') }}"
+  loop:
+    - path: "{{ paths.access.mountpoint }}/consumer_dir"
+      mode: "0770"
+    - path: "/mnt/logs/disapp/"
+    - path: "/mnt/logs/subapp/"
+    - path: "/mnt/logs/subapp_ws/"
+  # tags: [notest]
+- name: ... and now mount them
   ansible.posix.mount:
     path: "{{ item.path }}"
     src: "{{ item.src }}"
     state: "{{ item.state | default('mounted') }}"
-    fstype: nfs
+    fstype: "{{ item.fstype | default('nfs') }}"
     opts: "{{ item.opts | default( nfs_opts.v3 ) }}"
-  with_items:
-    - path: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_download']['path'] | default('/home/import/download') }}"
+  loop:
+    # DisApp
+    - path: "{{ paths.log_disapp.mountpoint }}"
+      src: "/mnt/logs/disapp/"
+      opts: "bind,_netdev,x-systemd.requires-mounts-for=/mnt/logs/"
+      fstype: none
+    # SubApp
+    - path: "{{ paths.log_subapp.mountpoint }}"
+      src: "/mnt/logs/subapp/"
+      opts: "bind,_netdev,x-systemd.requires-mounts-for=/mnt/logs/"
+      fstype: none
+    # SubApp-Webservice
+    - path: "{{ paths.log_subapp_ws.mountpoint }}"
+      src: "/mnt/logs/subapp_ws/"
+      opts: "bind,_netdev,x-systemd.requires-mounts-for=/mnt/logs/"
+      fstype: none
+    # SFTP
+    - path: "{{ paths.sftp_download.mountpoint }}"
       src: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_download']['nfs_share'] }}"
       opts: "{{ nfs_mounts_subapp.hosts[ansible_hostname]['sftp_download']['nfs_opts'] }}"
   tags: [notest]
@@ -75,11 +103,19 @@
     group: "{{ item.group | default(omit) }}"
     mode: "{{ item.mode | default('0770') }}"
     state: "{{ item.state | default('directory') }}"
-  with_items:
-    - path: "{{ paths.log.mountpoint }}"
-      owner: "{{ paths.log.owner }}"
-      group: "{{ paths.log.group }}"
-      mode: "{{ paths.log.mode }}"
+  loop:
+    - path: "{{ paths.log_disapp.mountpoint }}"
+      owner: "{{ paths.log_disapp.owner }}"
+      group: "{{ paths.log_disapp.group }}"
+      mode: "{{ paths.log_disapp.mode }}"
+    - path: "{{ paths.log_subapp.mountpoint }}"
+      owner: "{{ paths.log_subapp.owner }}"
+      group: "{{ paths.log_subapp.group }}"
+      mode: "{{ paths.log_subapp.mode }}"
+    - path: "{{ paths.log_subapp_ws.mountpoint }}"
+      owner: "{{ paths.log_subapp_ws.owner }}"
+      group: "{{ paths.log_subapp_ws.group }}"
+      mode: "{{ paths.log_subapp_ws.mode }}"
     - path: "{{ paths.access.mountpoint }}"
       owner: "{{ paths.access.owner }}"
       group: "{{ paths.access.group }}"
@@ -93,6 +129,8 @@
       group: "{{ paths.access.group }}"
       mode: "{{ paths.access.mode }}"
     - path: "{{ paths.sftp_download.mountpoint }}"
+      owner: "{{ paths.sftp_download.owner }}"
+      group: "{{ paths.sftp_download.group }}"
       mode: "{{ paths.sftp_download.mode }}"
     - path: "{{ paths.ingest.mountpoint }}"
       owner: "{{ paths.ingest.owner }}"
diff --git a/tasks/configure_processing_user.yml b/tasks/configure_processing_user.yml
index ec534c9e064cc74e5e9a21e6940af9f651219e91..9a5b7a63425f9c671641b90af943ce0ce65ff261 100644
--- a/tasks/configure_processing_user.yml
+++ b/tasks/configure_processing_user.yml
@@ -8,9 +8,25 @@
   ansible.builtin.copy:
     remote_src: true
     src: "/etc/skel/.vimrc"
-    dest: "/home/{{ vault_subapp_user }}/.vimrc"
+    dest: "/home/{{ vault_disapp_user }}/.vimrc"
     mode: "0644"
   when: vimrc_skel.stat.exists
+  loop:
+    - "{{ vault_disapp_user }}"
+    - "{{ vault_subapp_user }}"
+
+- name: configure .bashrc for DisApp user
+  ansible.builtin.blockinfile:
+    path: "/home/{{ vault_disapp_user }}/.bashrc"
+    backup: "no"
+    create: "yes"
+    owner: "{{ vault_disapp_user }}"
+    group: "{{ vault_disapp_group }}"
+    mode: "0644"
+    marker: "# {mark} ANSIBLE MANAGED BLOCK - DisApp-specific"
+    state: present
+    block: |
+      cd ~
 
 - name: configure .bashrc for SubApp user
   ansible.builtin.blockinfile:
@@ -27,7 +43,7 @@
         if [[ $1 != "" ]]; then YEAR="$1"
         else YEAR="$( date +%Y )"
         fi
-        LOG="/var/log/subapp/$(hostname)/staff_mails.log"
+        LOG="/var/log/subapp/staff_mails.log"
         ( if [ -e ${LOG} ]; then
             cat ${LOG}
           fi
@@ -50,6 +66,22 @@
 
       cd ~
 
+- name: Add aliases for DisApp user
+  ansible.builtin.blockinfile:
+    path: "/home/{{ vault_disapp_user }}/.bash_aliases"
+    backup: "no"
+    create: "yes"
+    owner: "{{ vault_disapp_user }}"
+    group: "{{ vault_disapp_group }}"
+    mode: "0644"
+    state: present
+    marker: "# {mark} ANSIBLE MANAGED BLOCK - DISAPP SPECIFIC"
+    block: |
+      # custom aliases
+      alias disapp='/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/disapp_rosetta.pl --config-file {{ vault_subapp_vars.files.disapp.path }}'    # DisApp Alias
+      alias disapp_log='tail -f /var/log/disapp/disapp.log'        # show last log entries in disapp.log
+      alias disapp_monitor='nc localhost 9003'
+
 - name: Add aliases for SubApp user
   ansible.builtin.blockinfile:
     path: "/home/{{ vault_subapp_user }}/.bash_aliases"
@@ -62,16 +94,18 @@
     marker: "# {mark} ANSIBLE MANAGED BLOCK - SUBAPP SPECIFIC"
     block: |
       # custom aliases - user processing
-      alias subapp='/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_bagit.pl --config-file {{ vault_subapp_vars.files.subapp.path }}'    # SubApp alias
+      alias subapp='/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_rosetta.pl --config-file {{ vault_subapp_vars.files.subapp.path }}'    # SubApp alias
       alias sips_number='find /mnt/import/ -maxdepth 1 -type d | wc -l'        # count directories in /mnt/import/
-      alias sips_log='tail -f /var/log/subapp/${HOSTNAME}/sips.log | grep "what:.*,"'            # show last log entries in sips.log
-      alias subapp_log='tail -f /var/log/subapp/${HOSTNAME}/subapp.log'        # show last log entries in subapp.log
-      alias subapp_monitor='curl localhost:9001'
+      alias sips_log='tail -f /var/log/subapp//sips.log | grep "what:.*,"'            # show last log entries in sips.log
+      alias subapp_log='tail -f /var/log/subapp/subapp.log'        # show last log entries in subapp.log
+      alias subapp_monitor='nc localhost 9001'
       alias verify_bag='perl -e '\''use Archive::BagIt; my $bag = Archive::BagIt->new(".")->verify_bag( {report_all_errors => 1} );'\''; echo $?'
       alias block_subapp='echo "SubApp must not run, startup is blocked manually by an administrator. This file is here for a reason! Only remove it after consultation!" > ~/.subapp/BLOCKFILE'
       alias unblock_subapp='rm -f ~/.subapp/BLOCKFILE'
-      alias list_quarantine='for sip in $(ls -t ~/.subapp/quarantine); do echo "$sip# => $(head -n 1 ~/.subapp/quarantine/$sip/sip.ERROR), $(head -n 3 ~/.subapp/quarantine/$sip/sip.ERROR | tail -n 1)"; done | column -s "#" -t'
-      alias list_quarantine_full='for sip in $(ls -t ~/.subapp/quarantine); do echo -e "$sip# ($( stat -c %y ~/.subapp/quarantine/$sip/sip.ERROR ))# => $(cat ~/.subapp/quarantine/$sip/sip.ERROR)\n===\n"; done | column -s "#" -t'
-      alias quarantine_summary='echo -e "$( ls ~/.subapp/quarantine/ | wc -l ) SIPs in quarantine.\n"; for sip in $(ls -t ~/.subapp/quarantine/); do echo "$( du -sh /mnt/import/${sip} ),# $( stat -c %y ~/.subapp/quarantine/$sip/sip.ERROR ),# $( find /mnt/import/${sip}/ -type f | wc -l) files"; done | column -s "#" -t'
-      alias quarantine_size='sum=0; for dir in $(find ~/.subapp/quarantine/ -name "sip.bagit"); do if [[ -d $(realpath ${dir}) ]]; then sum=$((sum+$(du -s $(realpath ${dir}) | cut -f1))); else echo "broken Q-Link: ${dir}"; fi; done; echo "$((sum/1024)) MiB in quarantine."'
-      alias next='/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_bagit.pl --config-file /home/processing/.subapp/subapp.cfg --single_run; echo "################# LOG #####################"; echo "last 10 lines..."; tail -n 10 /var/log/subapp/${HOSTNAME}/sips.log; echo "################# PROBLEMS #####################"; START=$( grep -n "starting..." /var/log/subapp/${HOSTNAME}/subapp.log | tail -1 | cut -f1 -d: ); ALL_LINES=$( wc -l < /var/log/subapp/${HOSTNAME}/subapp.log ); let LINES=$ALL_LINES-$START; echo "checking last $LINES log lines..."; tail -n $LINES /var/log/subapp/${HOSTNAME}/subapp.log | grep --color -e "\[ERROR\]" -e "\[FATAL\]" -e "\[WARN\]"'
+
+      alias list_quarantine='readarray -d "" sips < <(find ~/.subapp/quarantine -mindepth 1 -maxdepth 1 -print0 -type d); for sip in "${sips[@]}"; do echo "$sip# => $(head -n 1 "$sip/sip.ERROR"),$(head -n 3 "$sip/sip.ERROR" | tail -n 1)"; done | column -s "#" -t'
+      alias list_quarantine_full='readarray -d "" sips < <(find ~/.subapp/quarantine -mindepth 1 -maxdepth 1 -print0 -type d); for sip in "${sips[@]}"; do echo -e "$sip# ($( stat -c %y "$sip/sip.ERROR" ))# => $(cat "$sip/sip.ERROR")\n===\n"; done | column -s "#" -t'
+      alias quarantine_size='sum=0; readarray -d "" sips < <(find ~/.subapp/quarantine/ -name "sip.bagit" -print0); for dir in "${sips[@]}"; do if [[ -d "$(realpath "${dir}")" ]]; then sum=$((sum+$(du -s "$(realpath "${dir}")" | cut -f1))); else echo "broken Q-Link: ${dir}"; fi; done; echo "$((sum/1024)) MiB in quarantine."'
+      alias quarantine_summary='echo -e "$( ls ~/.subapp/quarantine/ | wc -l ) SIPs in quarantine.\n"; readarray -d "" sips < <(find ~/.subapp/quarantine -mindepth 1 -maxdepth 1 -print0 -type d); for sip in "${sips[@]}"; do sip="$(basename "${sip}")"; echo "$( du -sh "/mnt/import/${sip}" ),# $( stat -c %y "~/.subapp/quarantine/${sip}/sip.ERROR" ),# $( find "/mnt/import/${sip}/" -type f | wc -l) files"; done | column -s "#" -t'
+
+      alias next='/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_bagit.pl --config-file /home/processing/.subapp/subapp.cfg --single_run; echo "################# LOG #####################"; echo "last 10 lines..."; tail -n 10 /var/log/subapp/sips.log; echo "################# PROBLEMS #####################"; START=$( grep -n "starting..." /var/log/subapp/subapp.log | tail -1 | cut -f1 -d: ); ALL_LINES=$( wc -l < /var/log/subapp/subapp.log ); let LINES=$ALL_LINES-$START; echo "checking last $LINES log lines..."; tail -n $LINES /var/log/subapp/subapp.log | grep --color -e "\[ERROR\]" -e "\[FATAL\]" -e "\[WARN\]"'
diff --git a/tasks/install_checkmk_plugins.yml b/tasks/install_checkmk_plugins.yml
index 52f9bbb8cba026a974dd2860bf47d94a45d708f9..bd8d940c7ad4cd5d048a96d103640322b9f25d68 100644
--- a/tasks/install_checkmk_plugins.yml
+++ b/tasks/install_checkmk_plugins.yml
@@ -18,4 +18,5 @@
     group: "root"
     mode: "0750"
   loop:
+    - "check_subapp_quarantine.sh"
     - "check_subapp_ws_status.sh"
diff --git a/tasks/install_move_logs.yml b/tasks/install_move_logs.yml
index 524dbb375b5dc4ae636db84131838947c60f647e..b0e7266be2e27e667d7092a43a27b070304e46ef 100644
--- a/tasks/install_move_logs.yml
+++ b/tasks/install_move_logs.yml
@@ -1,4 +1,10 @@
 ---
+- name: create SystemD unit directory
+  ansible.builtin.file:
+    path: "/usr/local/lib/systemd/system/"
+    state: directory
+    mode: "0755"
+
 - name: install timer and script for moving old logs to archive
   ansible.builtin.copy:
     src: "{{ item.path }}"
@@ -7,7 +13,7 @@
     owner: "{{ item.owner | default('root') }}"
     group: "{{ item.group | default('root') }}"
   loop:
-    - path: "etc/systemd/user/move_old_logs.timer"
+    - path: "usr/local/lib/systemd/system/move_old_logs.timer"
       mode: "0644"
     - path: "usr/local/bin/move_old_logs.sh"
       mode: "0755"
@@ -15,37 +21,25 @@
 
 - name: install service for moving old logs to archive
   ansible.builtin.template:
-    src: "etc/systemd/user/move_old_logs.service.j2"
-    dest: "/etc/systemd/user/move_old_logs.service"
-    mode: "0755"
+    src: "usr/local/lib/systemd/system/move_old_logs_@.service.j2"
+    dest: "/usr/local/lib/systemd/system/move_old_logs_@.service"
+    mode: "0644"
     owner: "root"
     group: "root"
+  notify: daemon-reload
 
-- name: find move_old_logs systemd units so we don't have to hardcode their names in the loops
-  ansible.builtin.find:
-    path: "/etc/systemd/user/"
-    pattern: "move_old_logs.*"
-  register: move_old_logs_units
-
-- name: check if move_old_logs units are already enabled
-  ansible.builtin.command: "systemctl is-enabled {{ item.path | basename }}"
-  loop: "{{ move_old_logs_units.files }}"
-  register: move_old_logs_enabled
-  changed_when: false
-  failed_when:
-    - move_old_logs_enabled.stdout != "enabled"
-    - move_old_logs_enabled.stdout != "disabled"
-    - '"No such file or directory" not in move_old_logs_enabled.stderr'
-
-- name: manually enable move_old_logs.service, because it cannot be found by the ansible.builtin.systemd module when the timer is located below "/etc/systemd/user/"
-  ansible.builtin.command: "systemctl enable {{ item.item.path }}"
-  loop: "{{ move_old_logs_enabled.results }}"
-  when:
-    - item.stdout != "enabled"
-  register: move_old_logs_enablecmd
-  changed_when: move_old_logs_enablecmd.stdout in "Created symlink"
+- name: enable move_old_logs services
+  ansible.builtin.systemd:
+    name: "move_old_logs_@{{ item }}.service"
+    daemon_reload: true
+    enabled: true
+  loop:
+    - "disapp"
+    - "subapp"
+    - "subapp_webservice"
 
 - name: start timer for moving old logs to archive (the service is triggered by the timer and doesn't need to be started separately)
   ansible.builtin.systemd:
     name: "move_old_logs.timer"
+    enabled: true
     state: started
diff --git a/tasks/install_packages.yml b/tasks/install_packages.yml
index e2c4af02456bc3c2bc579bfd3df8a183aeac6bf6..11200240d1456214f724b91032f58b397fa3dc62 100644
--- a/tasks/install_packages.yml
+++ b/tasks/install_packages.yml
@@ -10,6 +10,7 @@
       'libxalan-c112',
       'libxerces-c-dev',
       'libxml2-utils',
+      'netcat-openbsd',
       'p7zip-full',
       'rsync',
       'sqlite3',
diff --git a/tasks/install_subapp.yml b/tasks/install_subapp.yml
index f703249bba94a222790c60c9474f4ac91c459cad..2e0d3400b48943e7342eb6eda1e24a520bc07d26 100644
--- a/tasks/install_subapp.yml
+++ b/tasks/install_subapp.yml
@@ -1,19 +1,29 @@
 ---
 - name: Berechtigungen für Blockdevice ".subapp" korrigieren
   ansible.builtin.file:
-    path: "/home/{{ vault_subapp_user }}/.subapp/"
+    path: "{{ item.path }}"
     state: directory
-    owner: "{{ vault_subapp_user }}"
-    group: "{{ vault_subapp_group }}"
+    owner: "{{ item.owner }}"
+    group: "{{ item.group }}"
     mode: "0750"
+  loop:
+    - path: "/home/{{ vault_disapp_user }}/.disapp/"
+      owner: "{{ vault_disapp_user }}"
+      group: "{{ vault_disapp_group }}"
+    - path: "/home/{{ vault_subapp_user }}/.subapp/"
+      owner: "{{ vault_subapp_user }}"
+      group: "{{ vault_subapp_group }}"
 
 - name: configure PIDfile directory creation
   ansible.builtin.template:
-    src: "usr/lib/tmpfiles.d/subapp-pid-dir.conf.j2"
-    dest: "/usr/lib/tmpfiles.d/subapp-pid-dir.conf"
+    src: "usr/lib/tmpfiles.d/{{ item }}.j2"
+    dest: "/usr/lib/tmpfiles.d/{{ item }}"
     owner: "root"
     group: "root"
     mode: "0644"
+  loop:
+    - "subapp-pid-dir.conf"
+    - "disapp-pid-dir.conf"
   notify: create PIDfiles
 
 # erst nach der Erstellung der User/Gruppen durchführen!
@@ -24,6 +34,7 @@
   ansible.builtin.apt:
     name:
       - 'submissionapplication4rosetta'
+      - "submission-application4rosetta"
       # - 'libio-async-perl'                 # offical Debian package
       # - 'libio-aio-perl'                   # offical Debian package
       # - 'libparallel-parallel-map-perl'    # packaged by SLUBArchiv.digital from CPAN
@@ -31,11 +42,17 @@
     state: absent
     autoclean: true
     autoremove: true
-- name: Submission Application installieren
+  changed_when: false     # this is only needed for the migration to RC2022.2 and won't be needed afterwards
+- name: Submission / Dissemination Application installieren
   ansible.builtin.apt:
-    name: "submission-application4rosetta"
-    state: present
+    name: [
+      "common-application4rosetta",
+      "dissemination-application4rosetta",
+      "submission-application4rosetta",
+    ]
+    state: latest
     allow_unauthenticated: "true"
+  changed_when: false     # this is only needed for the migration to RC2022.2 and won't be needed afterwards
 
 - name: Systemd-Unitfiles installieren (Templates)
   ansible.builtin.template:
@@ -43,8 +60,9 @@
     dest: "/etc/systemd/user/{{ item }}"
     owner: "{{ vault_subapp_vars.files.subapp.owner }}"
     group: "{{ vault_subapp_vars.files.subapp.group }}"
-    mode: "{{ vault_subapp_vars.files.subapp.mode }}"
+    mode: "0644"
   loop:
+    - "disapp.service"
     - "subapp.service"
     - "webservice_status_SLUBarchiv.service"
   notify:
@@ -74,10 +92,11 @@
 - name: check which Services are enabled
   ansible.builtin.command: "systemctl is-enabled {{ item }}"
   loop:
-    - "webservice_status_SLUBarchiv.service"
-    - "subapp.service"
     - "chmod_sip_uploads.service"
     - "chown_dip_access.service"
+    - "disapp.service"
+    - "subapp.service"
+    - "webservice_status_SLUBarchiv.service"
   register: subapp_services_enabled
   changed_when: false
   failed_when:
@@ -96,6 +115,12 @@
   ansible.builtin.apt:
     name: "git"
     state: latest
+# "msg": "Failed to set a new url https://git.slub-dresden.de/digital-preservation/significantproperties.git for origin:  fatal: detected dubious ownership in repository at '/usr/local/share/significantproperties'\nTo add an exception for this directory, call:\n\n\tgit config --global --add safe.directory /usr/local/share/significantproperties\n"
+- name: git konfigurieren (muss gesetzt sein, sonst wirft der nächste Task den Fehler 'detected dubious ownership in repository')
+  community.general.git_config:
+    name: "safe.directory"
+    scope: "global"
+    value: "/usr/local/share/significantproperties"
 - name: Config für Signifikante Eigenschaften einspielen
   ansible.builtin.git:
     repo: "https://git.slub-dresden.de/digital-preservation/significantproperties.git"
@@ -128,13 +153,21 @@
         mode: "{{ vault_subapp_vars.files.blacklist.mode }}"
         state: touch
       when: not blacklist_file_exists.stat.exists
+    - name: write new DisApp config file
+      ansible.builtin.template:
+        src: "disapp.cfg.j2"
+        dest: "{{ vault_subapp_vars.files.disapp.path }}"
+        owner: "{{ vault_subapp_vars.files.disapp.owner }}"
+        group: "{{ vault_subapp_vars.files.disapp.group }}"
+        mode: "{{ vault_subapp_vars.files.disapp.mode | default('0400') }}"
+      with_items: "{{ vault_subapp_vars.hosts[ansible_hostname] | default(vault_subapp_vars.hosts['sdvlzasubappmoleculetest']) }}"
     - name: write new SubApp config file
       ansible.builtin.template:
         src: "subapp.cfg.j2"
         dest: "{{ vault_subapp_vars.files.subapp.path }}"
         owner: "{{ vault_subapp_vars.files.subapp.owner }}"
         group: "{{ vault_subapp_vars.files.subapp.group }}"
-        mode: "{{ vault_subapp_vars.files.subapp.mode }}"
+        mode: "{{ vault_subapp_vars.files.subapp.mode | default('0400') }}"
       with_items: "{{ vault_subapp_vars.hosts[ansible_hostname] | default(vault_subapp_vars.hosts['sdvlzasubappmoleculetest']) }}"
 
 - name: Quarantaeneverzeichnis & Lockverzeichnis anlegen
@@ -147,29 +180,70 @@
   loop:
     - "/home/{{ vault_subapp_user }}/.subapp/quarantine"
     - "/home/{{ vault_subapp_user }}/.subapp/lockdir"
+    - "/home/{{ vault_disapp_user }}/.disapp/lockdir"
+
+- name: Symlinks zu alten Loglocations bereinigen
+  ansible.builtin.file:
+    path: "/home/{{ vault_subapp_user }}/.subapp/{{ ansible_hostname }}"
+    state: absent
 
-# Softlinks für SubApp-Konfigurationen und nach /var/log setzen
-- name: Softlinks für SubApp-Konfigurationen und nach /var/log setzen
+- name: Symlinks für SubApp-Konfigurationen und nach /var/log setzen
   ansible.builtin.file:
     src: "{{ item.src }}"
     dest: "{{ item.dest }}"
-    state: link
-    owner: "{{ vault_subapp_user }}"
-    group: "{{ vault_subapp_group }}"
-  with_items:
+    state: "{{ item.state | default('link') }}"
+    mode: "{{ item.mode | default(omit) }}"
+    owner: "{{ item.owner }}"
+    group: "{{ item.group }}"
+  loop:
+    # DisApp
+    - src: "{{ vault_subapp_vars.files.disapp.path }}"
+      dest: "/home/{{ vault_disapp_user }}/.disapp.cfg"
+      owner: "{{ vault_disapp_user }}"
+      group: "{{ vault_disapp_group }}"
+    - src: "{{ paths.log_disapp.mountpoint }}"
+      dest: "/home/{{ vault_disapp_user }}/.disapp/disapp_logs"
+      owner: "{{ vault_disapp_user }}"
+      group: "{{ vault_disapp_group }}"
+    # SubApp
     - src: "{{ vault_subapp_vars.files.subapp.path }}"
       dest: "/home/{{ vault_subapp_user }}/.subapp.cfg"
-    - src: "/var/log/subapp/{{ ansible_hostname }}"
-      dest: "/home/{{ vault_subapp_user }}/.subapp/{{ ansible_hostname }}"
+      owner: "{{ vault_subapp_user }}"
+      group: "{{ vault_subapp_group }}"
+    - src: "{{ paths.log_subapp.mountpoint }}"
+      dest: "/home/{{ vault_subapp_user }}/.subapp/subapp_logs"
+      owner: "{{ vault_subapp_user }}"
+      group: "{{ vault_subapp_group }}"
+    # Webservice
+    - src: "{{ paths.log_subapp_ws.mountpoint }}"
+      dest: "/home/{{ vault_subapp_user }}/.subapp/subapp_ws_logs"
+      owner: "{{ vault_subapp_user }}"
+      group: "{{ vault_subapp_group }}"
 
-# Bash-Completion einspielen
-- name: Konfiguration für Bash-Completion einspielen
-  ansible.builtin.copy:
-    src: "home/{{ vault_subapp_user }}/.bash_completion"
-    dest: "/home/{{ vault_subapp_user }}/.bash_completion"
-    owner: "{{ vault_subapp_user }}"
-    group: "{{ vault_subapp_group }}"
-    mode: "0644"
+# Bash-Completion funktioniert ab 2020.2 anders, s. Abschnitt AUTOCOMPLETION in perldoc bin/subapp_rosetta.pl und bin/disapp_rosetta.pl
+# https://ansible-lint.readthedocs.io/en/latest/usage/#false-positives-skipping-rules
+- name: Bash-Completion aktivieren    # noqa command-instead-of-shell
+  ansible.builtin.shell:
+    chdir: "/usr/local/bin/"
+    cmd: "{{ item }}"
+    executable: "/usr/bin/bash"       # Yup, this REALLY needs a Bash, so we HAVE to use ansible.builtin.shell, so don't show linter errors
+  loop:
+    - "complete -C subapp_rosetta.pl subapp_rosetta.pl"
+    - "complete -C disapp_rosetta.pl disapp_rosetta.pl"
+  changed_when: false
+
+- name: alte Bash-Completion entfernen
+  ansible.builtin.file:
+    path: "/home/{{ vault_subapp_user }}/.bash_completion"
+    state: absent
+
+# - name: Konfiguration für Bash-Completion einspielen
+#   ansible.builtin.copy:
+#     src: "home/{{ vault_subapp_user }}/.bash_completion"
+#     dest: "/home/{{ vault_subapp_user }}/.bash_completion"
+#     owner: "{{ vault_subapp_user }}"
+#     group: "{{ vault_subapp_group }}"
+#     mode: "0644"
 
 - name: alte Stichproben entfernen
   block:
@@ -190,11 +264,13 @@
         - "/etc/systemd/user/stichprobe-daily-report.timer"
         - "/etc/systemd/user/stichprobe-daily-report.service"
         - "/home/{{ vault_subapp_user }}/.subapp/stichprobe.ini"
+        - "/etc/systemd/user/check_ie_sample.timer"
+        - "/etc/systemd/user/check_ie_sample.service"
 
-- name: check_ie_sample Timer
+- name: deploy check_ie_sample Service
   ansible.builtin.template:
-    src: "check_ie_sample.timer.j2"
-    dest: "/etc/systemd/user/check_ie_sample.timer"
+    src: "usr/local/lib/systemd/system/check_ie_sample.service.j2"
+    dest: "/usr/local/lib/systemd/system/check_ie_sample.service"
     owner: "root"
     group: "root"
     mode: "0644"
@@ -202,10 +278,10 @@
   when: ansible_hostname == item.key
   no_log: true
 
-- name: check_ie_sample Service
+- name: deploy check_ie_sample Timer
   ansible.builtin.template:
-    src: "check_ie_sample.service.j2"
-    dest: "/etc/systemd/user/check_ie_sample.service"
+    src: "usr/local/lib/systemd/system/check_ie_sample.timer.j2"
+    dest: "/usr/local/lib/systemd/system/check_ie_sample.timer"
     owner: "root"
     group: "root"
     mode: "0644"
@@ -213,22 +289,21 @@
   when: ansible_hostname == item.key
   no_log: true
 
-# - name: enable check_ie_sample Service
-#   ansible.builtin.command: "systemctl enable /etc/systemd/user/check_ie_sample.service"
-#   loop: "{{ lookup('dict', vault_stichprobe_hosts) }}"
-#   when: ansible_hostname == item.key
-#   no_log: true
-#
-# - name: enable check_ie_sample Timers
-#   ansible.builtin.command: "systemctl enable /etc/systemd/user/check_ie_sample.timer"
-#   loop: "{{ lookup('dict', vault_stichprobe_hosts) }}"
-#   when: ansible_hostname == item.key
-#   no_log: true
-#
-# - name: restart check_ie_sample Timers
-#   ansible.builtin.systemd:
-#     name: "check_ie_sample.timer"
-#     state: restarted
-#   loop: "{{ lookup('dict', vault_stichprobe_hosts) }}"
-#   when: ansible_hostname == item.key
-#   ignore_errors: true
+- name: enable check_ie_sample Service
+  ansible.builtin.systemd:
+    unit: "check_ie_sample.service"
+    enabled: true
+    daemon_reload: true
+  loop: "{{ lookup('dict', vault_check_ie_sample_hosts) }}"
+  when: ansible_hostname == item.key
+  no_log: true
+
+- name: enable & start check_ie_sample Timer
+  ansible.builtin.systemd:
+    unit: "check_ie_sample.timer"
+    enabled: true
+    state: restarted
+    daemon_reload: true
+  loop: "{{ lookup('dict', vault_check_ie_sample_hosts) }}"
+  when: ansible_hostname == item.key
+  no_log: true
diff --git a/tasks/install_ta_tools.yml b/tasks/install_ta_tools.yml
index 5a50886ca8bb2a8cb081e2c51b5ce81aa887a4f4..f156e941272666c62fe06301213b2ae64897a933 100644
--- a/tasks/install_ta_tools.yml
+++ b/tasks/install_ta_tools.yml
@@ -1,30 +1,53 @@
 ---
 - name: install Git (prerequisite for ansible.builtin.git)
   ansible.builtin.apt:
-    name: "git"
+    name: [
+      "git",
+      "libdist-zilla-perl",
+    ]
     state: latest
 
 - name: checkout ta-tools repo
   ansible.builtin.git:
     repo: "https://git.slub-dresden.de/digital-preservation/tools-for-technical-analysts.git"
     dest: "/tmp/tools-for-technical-analysts/"
+  register: ta_tools_git
 
-# https://docs.ansible.com/ansible/latest/collections/ansible/builtin/copy_module.html
-# 'If path is a directory, it is copied recursively. In this case, if path ends
-# with "/", only inside contents of that directory are copied to destination.'
+# Run `dzil listdeps` to get an updated list of dependencies.
+# Commented dependencies are not available as Debian packages and will be
+# installed when running `dzil install` in the next task.
+- name: install dependencies for ta-tools
+  ansible.builtin.apt:
+    name: [
+      # App::Cmd::Setup
+      "libdata-printer-perl",
+      "libdatetime-perl",
+      "libdatetime-format-dateparse-perl",
+      # ExtUtils::MakeMaker
+      # IO::Zlib
+      # LWP::UserAgent
+      "libpath-tiny-perl",
+      "libregexp-optimizer-perl",
+      "libsoap-lite-perl",
+      "libtext-csv-perl",
+      "libyaml-perl",
+    ]
+
+# This is THE main purpose of this task file, so we won't run this as a handler
+# but set "noqa: no-handler" instead.
 - name: install ta-tools
-  ansible.builtin.copy:
-    src: "/tmp/tools-for-technical-analysts/{{ item.src }}"
-    dest: "{{ item.dest }}"
-    mode: "0644"
-    remote_src: true
-  loop:
-    - src: "bin/"
-      dest: "/usr/local/bin/"
-    - src: "lib/"
-      dest: "/usr/local/perl/"
+  ansible.builtin.command:
+    cmd: "dzil install"
+    chdir: "/tmp/tools-for-technical-analysts/"
+  when: ta_tools_git.changed    # noqa: no-handler
 
-- name: set execution bit for ta-tools script
+- name: create directories needed by ta-tool
   ansible.builtin.file:
-    path: "/usr/local/bin/ta-tool.pl"
+    path: "/home/{{ vault_subapp_user }}/{{ item }}/"
+    state: directory
     mode: "0755"
+    owner: "{{ vault_subapp_user }}"
+    group: "{{ vault_subapp_group }}"
+  loop:
+    - ".cache"
+    - ".config"
diff --git a/tasks/main.yml b/tasks/main.yml
index a6ad0f2010cdedebd46f9798e87ad237df82d53f..1b4afcafc711bf22e1efef4f8385bcdb41491029 100644
--- a/tasks/main.yml
+++ b/tasks/main.yml
@@ -53,6 +53,10 @@
   ansible.builtin.import_tasks: "configure_processing_user.yml"
   tags: [users, bash]
 
+- name: Bash konfigurieren
+  ansible.builtin.import_tasks: "configure_bash.yml"
+  tags: [users, bash]
+
 - name: Check_MK Plugins installieren
   ansible.builtin.import_tasks: "install_checkmk_plugins.yml"
   tags: [monitoring, checkmk]
diff --git a/templates/check_ie_sample.service.j2 b/templates/check_ie_sample.service.j2
deleted file mode 100644
index c5d795ab80b980cdf6ed0c183a411a79cb02321b..0000000000000000000000000000000000000000
--- a/templates/check_ie_sample.service.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-Description=service: daily report from stichprobe, institute {{ item.value.institute_name }}
-Documentation=perldoc /usr/local/bin/check_ie_sample.pl
-
-[Service]
-Type=oneshot
-Restart=no
-ExecStart=/usr/bin/perl -I /usr/local/perl /usr/local/bin/check_ie_sample.pl --institute {{ item.value.institute_name }} --email langzeitarchiv@slub-dresden.de --host {{ item.value.rosetta_host }} --user {{ item.value.user }} --password {{ item.value.password }} --sampling_factor 0.001
-User={{ vault_subapp_user }}
-Group={{ vault_subapp_group }}
-
-[Install]
-WantedBy=default.target
diff --git a/templates/disapp.cfg.j2 b/templates/disapp.cfg.j2
new file mode 100644
index 0000000000000000000000000000000000000000..be83db2c101e4aa23705746458c14247b2a3b92b
--- /dev/null
+++ b/templates/disapp.cfg.j2
@@ -0,0 +1,79 @@
+###########################
+### OPTIONAL PARAMETERS ###
+###########################
+
+
+
+### optional processing and log settings
+
+# Absolute path to a PID file
+pid_file:/run/disapp/disapp_bagit.pid
+# Log4perl log level
+logger_min_level:debug
+
+
+
+############################
+### MANDATORY PARAMETERS ###
+############################
+
+
+
+### internal working directories
+
+# Absolute path to the directory that is used for controlling, processing and storing exports from Rosetta.
+# Restore requests need to be put below directory_export/consumer_dir/.
+# Restored IEs can be found in directory_export/rosetta_export/.
+directory_export:/mnt/{{ ansible_hostname }}_access/
+# Absolute path to the directory which is used for placing lockfiles into for SIPs when their processing commences.
+directory_lock:/home/{{ vault_disapp_user }}/.disapp/lockdir/
+
+
+
+### user/group settings
+
+# Name of the Linux user that owns the subapp's access directory.
+owner_user_export_dir:{{ vault_disapp_user }}
+# Name of the Linux group that owns the subapp's access directory.
+owner_group_export_dir:{{ vault_disapp_group }}
+
+
+
+### Rosetta settings
+
+# FQDN of a) the host that the Rosetta application with the DEP role is running on or b) the load balancer that is in front of a Rosetta application cluster
+rosetta_host:{{ vault_subapp_vars.hosts[ansible_hostname].RosettaHost | default("ROSETTA_HOSTNAME_TEMPLATE") }}
+# FQDN of the host that the PDS authentication server is running on
+rosetta_pdshost:{{ vault_subapp_vars.hosts[ansible_hostname].PdsHost | default("PDS_HOSTNAME_TEMPLATE") }}
+# name of the institution in Rosetta that the subapp will ingest its SIPs into
+rosetta_institute:{{ vault_subapp_vars.hosts[ansible_hostname].Institute | default("INSTITUTE_NAME_TEMPLATE") }}
+# Material Flow ID of the Material Flow that will be used for processing SIPs in Rosetta
+rosetta_materialflowId:{{ vault_subapp_vars.hosts[ansible_hostname].MaterialFlowID | default("MATERIAL_FLOW_ID_TEMPLATE") }}
+# username of the user that the subapp will use for authentication against PDS/Rosetta
+rosetta_user:{{ vault_subapp_vars.hosts[ansible_hostname].User | default("SUBMISSION_APPLICATION_USER_TEMPLATE") }}
+# password of that user
+rosetta_password:{{ vault_subapp_vars.hosts[ansible_hostname].Rosetta_Password }}
+
+
+
+### processing configuration
+
+# Workflow name as agreed upon in the contract between producer and archive.
+fullname_workflow:{{ vault_subapp_vars.hosts[ansible_hostname].fullname_workflow | default("WORKFLOW_NAME_TEMPLATE") }}
+
+
+
+### email notification configuration
+
+# notification email address for consumers
+logger_consumer_email:{{ vault_subapp_vars.hosts[ansible_hostname].logger_producer_email | default("LOGGER_PRODUCER_EMAIL_TEMPLATE") }}
+# notification email address for archive staff (low level error information)
+logger_staff_email:{{ vault_subapp_vars.hosts[ansible_hostname].logger_staff_email | default("LOGGER_STAFF_EMAIL_TEMPLATE") }}
+
+
+
+### database configuration
+
+# Absolute path to SQLite database file for storing and loading message queues and SIP states
+# Hints: using /tmp is not allowed by SQLite, furthermore security requires the parent directory to be set to at least '750' (drwxr-x---)
+database_file:/home/{{ vault_disapp_user }}/.disapp/disapp.db
diff --git a/templates/etc/systemd/user/disapp.service.j2 b/templates/etc/systemd/user/disapp.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..e2ac5a511651a5ff59ec033ab1dfd23f08021912
--- /dev/null
+++ b/templates/etc/systemd/user/disapp.service.j2
@@ -0,0 +1,65 @@
+[Unit]
+Description=SLUBArchiv Bagit-based Dissemination Application
+Documentation=man:disapp(7)
+After=remote-fs.target
+
+[Service]
+Type=forking
+Restart=no
+Environment="PERL5LIB=/usr/local/perl/"
+ExecStartPre=/bin/bash -c '\
+    BLOCKFILE="/home/{{ vault_subapp_user }}/.disapp/BLOCKFILE"; \
+    if [[ -e "$BLOCKFILE" ]]; then \
+        echo "Startup of DisApp is blocked by $BLOCKFILE."; \
+        cat $BLOCKFILE; \
+        exit 1; \
+    fi'
+ExecStart=/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/disapp_rosetta.pl \
+	--config-file {{ vault_subapp_vars.files.disapp.path }} \
+	--start
+ExecStop=/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/disapp_rosetta.pl \
+	--config-file {{ vault_subapp_vars.files.disapp.path }} \
+	--stop
+User={{ vault_disapp_user }}
+Group={{ vault_disapp_group }}
+# EXAMPLE: TimeoutSec=600
+TimeoutSec=infinity
+# DO NOT REMOVE!!! (based on SubApp Issue #68)
+# Do not kill any processes for a save shutdowns with a valid DisApp database.
+KillMode=none
+
+### Stability features
+# DEACTIVATED FOR DEBIAN 10, AS SYSTEMD DOESN'T SEEM TO SUPPORT THEM YET.
+# documented at "man (5) systemd.service" and
+# https://www.freedesktop.org/software/systemd/man/systemd.service.html
+#OOMPolicy=stop
+# documented at "man (5) systemd.exec" and
+# https://www.freedesktop.org/software/systemd/man/systemd.exec.html
+OOMScoreAdjust=-900
+
+### Security features
+# documented at "man (5) systemd.exec" and
+# https://www.freedesktop.org/software/systemd/man/systemd.exec.html
+# DEACTIVATED FOR DEBIAN 10, AS SYSTEMD DOESN'T SEEM TO SUPPORT THEM YET.
+# KEEP DEACTIVATED IF YOU WANT TO SEND EMAILS! EXIM DOESN'T WORK WITH
+# ANY OF THESE SETTINGS IN PLACE!
+#ProtectSystem=full
+#ProtectHostname=true
+#ProtectClock=true
+#ProtectKernelTunables=true
+#ProtectKernelModules=true
+#ProtectKernelLogs=true
+#ProtectControlGroups=true
+#LockPersonality=true
+##MemoryDenyWriteExecute=true
+#RestrictRealtime=true
+#RestrictSUIDSGID=true
+## RemoveIPC=true
+## PrivateMounts=true
+## MountFlags=
+## SystemCallFilter is a Whitelist!!!
+#SystemCallFilter=@aio,@basic-io,@debug,@file-system,@network-io
+#SystemCallErrorNumber=1337
+
+[Install]
+WantedBy=multi-user.target
diff --git a/templates/etc/systemd/user/subapp.service.j2 b/templates/etc/systemd/user/subapp.service.j2
index 818d1a15cc523ac68b1534be130f7419d4c682cb..984963fe9bec87b621a9d47198ddfb4394cc8b12 100644
--- a/templates/etc/systemd/user/subapp.service.j2
+++ b/templates/etc/systemd/user/subapp.service.j2
@@ -14,26 +14,12 @@ ExecStartPre=/bin/bash -c '\
         cat $BLOCKFILE; \
         exit 1; \
     fi'
-ExecStart=/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_bagit.pl --config-file {{ vault_subapp_vars.files.subapp.path }} --start
-ExecStop=/bin/bash -c '\
-    PID=$(cat /run/subapp/subapp_bagit.pid); \
-    if [[ ! "$PID" =~ ^[0-9]+$ ]]; then \
-        echo "something broke, no valid PID for submission application daemon"; \
-        exit 1; \
-    fi; \
-    echo "sending --stop to submission application daemon" | systemd-cat -p info; \
-    /usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_bagit.pl --config-file {{ vault_subapp_vars.files.subapp.path }} --stop; \
-    echo " waiting for submission application daemon to finish" | systemd-cat -p info; \
-    while true; do \
-        ps -p $PID > /dev/null; \
-        if [[ $? != 0 ]]; then \
-            break; \
-        fi; \
-        echo "submission application daemon is still running, waiting another 5 seconds" | systemd-cat -p warning; \
-        sleep 5; \
-    done; \
-    echo "submission application daemon stopped" | systemd-cat -p info'
-
+ExecStart=/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_rosetta.pl \
+	--config-file {{ vault_subapp_vars.files.subapp.path }} \
+	--start
+ExecStop=/usr/bin/perl -I /usr/local/perl/ /usr/local/bin/subapp_rosetta.pl \
+	--config-file {{ vault_subapp_vars.files.subapp.path }} \
+	--stop
 User={{ vault_subapp_user }}
 Group={{ vault_subapp_group }}
 # EXAMPLE: TimeoutSec=600
@@ -43,10 +29,9 @@ TimeoutSec=infinity
 KillMode=none
 
 ### Stability features
-# DEACTIVATED FOR DEBIAN 10, AS SYSTEMD DOESN'T SEEM TO SUPPORT THEM YET.
 # documented at "man (5) systemd.service" and
 # https://www.freedesktop.org/software/systemd/man/systemd.service.html
-#OOMPolicy=stop
+OOMPolicy=stop
 # documented at "man (5) systemd.exec" and
 # https://www.freedesktop.org/software/systemd/man/systemd.exec.html
 OOMScoreAdjust=-900
@@ -54,8 +39,9 @@ OOMScoreAdjust=-900
 ### Security features
 # documented at "man (5) systemd.exec" and
 # https://www.freedesktop.org/software/systemd/man/systemd.exec.html
-# DEACTIVATED FOR DEBIAN 10, AS SYSTEMD DOESN'T SEEM TO SUPPORT THEM YET.
-#ProtectSystem=strict
+# KEEP DEACTIVATED IF YOU WANT TO SEND EMAILS! EXIM DOESN'T WORK WITH
+# ANY OF THESE SETTINGS IN PLACE!
+#ProtectSystem=full
 ## ProtectHome=read-only
 #ProtectHostname=true
 #ProtectClock=true
@@ -64,7 +50,7 @@ OOMScoreAdjust=-900
 #ProtectKernelLogs=true
 #ProtectControlGroups=true
 #LockPersonality=true
-#MemoryDenyWriteExecute=true
+##MemoryDenyWriteExecute=true
 #RestrictRealtime=true
 #RestrictSUIDSGID=true
 ## RemoveIPC=true
diff --git a/templates/etc/systemd/user/webservice_status_SLUBarchiv.service.j2 b/templates/etc/systemd/user/webservice_status_SLUBarchiv.service.j2
index 46d5609b5108b069d033c8b639fed385d3c16204..bd5a1c12183ce467a3a89975b1c6e51087080151 100644
--- a/templates/etc/systemd/user/webservice_status_SLUBarchiv.service.j2
+++ b/templates/etc/systemd/user/webservice_status_SLUBarchiv.service.j2
@@ -1,6 +1,6 @@
 [Unit]
 Description=Webservice "Check SIP archival status"
-After=network.target
+After=remote-fs.target
 
 [Service]
 ExecStart=/usr/bin/nohup /usr/bin/perl -I /usr/local/perl /usr/local/bin/webservice_status_SLUBarchiv.pl --config-file {{ vault_subapp_vars.files.subapp.path }} &
@@ -13,8 +13,10 @@ User={{ vault_subapp_user }}
 ### Security features
 # documented at https://www.freedesktop.org/software/systemd/man/systemd.exec.html
 # DEACTIVATED FOR DEBIAN 10, AS SYSTEMD DOESN'T SEEM TO SUPPORT THEM YET.
-#ProtectSystem=strict
-#ProtectHome=read-only
+# KEEP DEACTIVATED IF YOU WANT TO SEND EMAILS! EXIM DOESN'T WORK WITH
+# ANY OF THESE SETTINGS IN PLACE!
+#ProtectSystem=full
+##ProtectHome=read-only
 #ProtectHostname=true
 #ProtectClock=true
 #ProtectKernelTunables=true
@@ -22,7 +24,7 @@ User={{ vault_subapp_user }}
 #ProtectKernelLogs=true
 #ProtectControlGroups=true
 #LockPersonality=true
-#MemoryDenyWriteExecute=true
+##MemoryDenyWriteExecute=true
 #RestrictRealtime=true
 #RestrictSUIDSGID=true
 ## RemoveIPC=true
diff --git a/templates/subapp.cfg.j2 b/templates/subapp.cfg.j2
index 9aa37545def3d58f08f10b2e0dd750f763065974..70e6909b1698a0be715bcc5ab5a9d3a840d47ad3 100644
--- a/templates/subapp.cfg.j2
+++ b/templates/subapp.cfg.j2
@@ -9,7 +9,7 @@
 # Absolute path to a file that contains a CSV list of SIPs that should be ignored by the subapp. One entry per line. You can create the file and leave it empty if you like; subapp will then act as if there was no blacklist at all and process all the SIPs.
 blacklist_sip_file:/home/{{ vault_subapp_user }}/.subapp/usa_blacklist_file.csv
 # Absolute path to a PID file
-pid_file:/run/subapp/subapp_bagit.pid
+pid_file:/run/subapp/subapp_rosetta.pid
 # Log4perl log level
 logger_min_level:debug
 # Blocking AIP updates
@@ -25,10 +25,6 @@ ingest_only:{{ vault_subapp_vars.hosts[ansible_hostname].ingest_only | default("
 
 ### internal working directories
 
-# Absolute path to the directory that is used for controlling, processing and storing exports from Rosetta.
-# Restore requests need to be put below directory_export/consumer_dir/.
-# Restored IEs can be found in directory_export/rosetta_export/.
-directory_export:/mnt/{{ ansible_hostname }}_access/
 # Absolute path to the directory which is used for placing lockfiles into for SIPs when their processing commences.
 directory_lock:/home/{{ vault_subapp_user }}/.subapp/lockdir/
 # Absolute path to the directory which contains symlinks to the SIPs that encountered errors during the processing and are put into quarantine. These SIPs will be ignored until the error is resolved.
@@ -50,8 +46,6 @@ owner_user_ingest_dir:{{ vault_subapp_user }}
 owner_group_import_dir:import
 # Name of the Linux group that owns the subapp's ingest directory.
 owner_group_ingest_dir:{{ vault_subapp_group }}
-# Name of the Linux group that owns the subapp's access directory.
-owner_group_export_dir:access
 
 
 
diff --git a/templates/usr/lib/check_mk_agent/plugins/check_subapp_quarantine.sh.j2 b/templates/usr/lib/check_mk_agent/plugins/check_subapp_quarantine.sh.j2
new file mode 100755
index 0000000000000000000000000000000000000000..86e113d75728a0c9746d42167bb6960c336a678c
--- /dev/null
+++ b/templates/usr/lib/check_mk_agent/plugins/check_subapp_quarantine.sh.j2
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+WARN_THRESHOLD=10
+ERROR_THRESHOLD=50
+
+QUARANTINE="$( ls /home/{{ vault_subapp_user }}/.subapp/quarantine/ | wc -l )"
+
+# Status Servicename Metriken Details
+echo "P \"SubApp SIPs Quarantine\" quara=${QUARANTINE};${WARN_THRESHOLD};${ERROR_THRESHOLD} ${QUARANTINE} SIPs in Quarantine on ${HOSTNAME}."
+
diff --git a/templates/usr/lib/tmpfiles.d/disapp-pid-dir.conf.j2 b/templates/usr/lib/tmpfiles.d/disapp-pid-dir.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..3a8de19bbfd521e5cc53099955d19f97568694f4
--- /dev/null
+++ b/templates/usr/lib/tmpfiles.d/disapp-pid-dir.conf.j2
@@ -0,0 +1 @@
+d /run/disapp 0750 {{ vault_disapp_user }} {{ vault_disapp_group }} -
diff --git a/templates/usr/local/lib/systemd/system/check_ie_sample.service.j2 b/templates/usr/local/lib/systemd/system/check_ie_sample.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..9371162907b6d5178d33a68916d2c7f2df143f4a
--- /dev/null
+++ b/templates/usr/local/lib/systemd/system/check_ie_sample.service.j2
@@ -0,0 +1,29 @@
+[Unit]
+Description=service: daily report for sampling {{ item.value.institute_name }} workflows
+Documentation=perldoc /usr/local/bin/check_ie_sample.pl
+
+[Service]
+Type=oneshot
+Restart=no
+# Sampling factor = 1/100/365
+#ExecStart=/usr/bin/perl -I /usr/local/perl /usr/local/bin/check_ie_sample.pl \
+#	--institute {{ item.value.institute_name }} \
+#	--email langzeitarchiv@slub-dresden.de \
+#	--host {{ item.value.rosetta_host }}.slub-dresden.de \
+#	--user {{ item.value.user }} \
+#	--password {{ item.value.password }} \
+#	--sampling_factor 0.0000274 \
+#	--dead_reference {{ item.value.institute_dead_ref_process_id }}
+ExecStart=/usr/bin/perl -I /usr/local/perl /usr/local/bin/check_ie_sample.pl \
+	--institute {{ item.value.institute_name }} \
+	--email langzeitarchiv@slub-dresden.de \
+	--host {{ item.value.rosetta_host }}.slub-dresden.de \
+	--user {{ item.value.user }} \
+	--password {{ item.value.password }} \
+	--sampling_factor 0.0000274 \
+	--fixity_check {{ item.value.institute_fixity_check_process_id }}
+User={{ vault_subapp_user }}
+Group={{ vault_subapp_group }}
+
+[Install]
+WantedBy=default.target
diff --git a/templates/check_ie_sample.timer.j2 b/templates/usr/local/lib/systemd/system/check_ie_sample.timer.j2
similarity index 72%
rename from templates/check_ie_sample.timer.j2
rename to templates/usr/local/lib/systemd/system/check_ie_sample.timer.j2
index 94372a6f7320a7ce134f0683972789db1c28a4e8..0a42a5580bcc13f11ccd4c1a62543370654657e0 100644
--- a/templates/check_ie_sample.timer.j2
+++ b/templates/usr/local/lib/systemd/system/check_ie_sample.timer.j2
@@ -1,18 +1,16 @@
 [Unit]
-Description=timer: daily report for stichprobe {{ item.value.institute_name }} workflows
+Description=timer: daily report for sampling {{ item.value.institute_name }} workflows
 Documentation=perldoc /usr/local/bin/check_ie_sample.pl
 
 [Timer]
-OnCalendar=monthly
+OnCalendar=daily
 #RandomizedDelaySec=14400
-# run every x minutes/hours/days
-#OnUnitActiveSec={{ item.value.timer_interval }}
 #Wake system from suspend mode
 WakeSystem=true
 #When activated, it triggers the service immediately if it missed the last start time, for example due to the system being powered off
 Persistent=true
 #Unit to activate when the timer elapses. (default is set to the same name as the timer unit, except for the suffix)
-Unit=stichprobe-daily-report.service
+Unit=check_ie_sample.service
 
 [Install]
 #is requires to activate the timer permanently
diff --git a/templates/etc/systemd/user/move_old_logs.service.j2 b/templates/usr/local/lib/systemd/system/move_old_logs_@.service.j2
similarity index 79%
rename from templates/etc/systemd/user/move_old_logs.service.j2
rename to templates/usr/local/lib/systemd/system/move_old_logs_@.service.j2
index 86f890db576852f0f0bdc38c772265bbe2f6cccb..8bf37860c0ceb35252a15255e963b940a36c6d49 100644
--- a/templates/etc/systemd/user/move_old_logs.service.j2
+++ b/templates/usr/local/lib/systemd/system/move_old_logs_@.service.j2
@@ -1,12 +1,12 @@
 [Unit]
-Description=move_old_logs.sh
+Description=Daemon to move old logfiles from previous year to archive (%i)
 After=remote-fs.target
 
 [Service]
 Type=simple
-ExecStart=/usr/local/bin/move_old_logs.sh
-User={{ vault_subapp_user }}
-Group={{ vault_subapp_group }}
+ExecStart=/usr/local/bin/move_old_logs.sh %i
+User=root
+Group=root
 
 ### Security features
 # documented at https://www.freedesktop.org/software/systemd/man/systemd.exec.html
@@ -21,7 +21,6 @@ ProtectControlGroups=true
 LockPersonality=true
 MemoryDenyWriteExecute=true
 RestrictRealtime=true
-RestrictSUIDSGID=true
 ## RemoveIPC=true
 ## PrivateMounts=true
 ## MountFlags=
diff --git a/vars/nfs-mounts.vault.example b/vars/nfs-mounts.vault.example
index 1b5c106cbfd9d9be1c275a94f17fe08df0e4c59e..eb6f7e170e92e9e361ca6f6d5a9012d9a82aaae7 100644
--- a/vars/nfs-mounts.vault.example
+++ b/vars/nfs-mounts.vault.example
@@ -19,7 +19,7 @@ nfs_opts:
 paths:
   log:
     nfs_share: "/vol/linux_server_logs/{{ ansible_hostname }}"
-    mountpoint: "/var/log/subapp/{{ ansible_hostname }}"
+    mountpoint: "/var/log/subapp/"
     owner: "USERNAME_HERE"
     group: "GROUPNAME_HERE"
     mode: "0xxx"
diff --git a/vars/stichprobe.vault.example b/vars/stichprobe.vault.example
index e9876faaa8aed63925c9f9b4895912cfe369b89b..1503a33d6b531cf8aac47beddc06ea8038e5dc47 100644
--- a/vars/stichprobe.vault.example
+++ b/vars/stichprobe.vault.example
@@ -13,16 +13,10 @@ vault_stichprobe_hosts:
 #  <name-of-subapp-server>:
 #    host: "<Rosetta-Webservice-API-Hostname>"
 #    user: "<Rosetta-Submission-Agent-Username>"
-#    password: !vault |
-#      $ANSIBLE_VAULT;1.1;AES256
-#      11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111
+#    password: "Sw0rdF!5h"
 #    email: "recipients@example.com"
 #    institute_name: "<name-of-institution-as-given-in-Rosetta>"
 #    institute_code: "<code-of-institution-as-given-in-Rosetta>"
 #    institute_dead_ref_process_id: "<Rosetta-Process-ID-for Dead-Reference-Identification-Job>"
 #    institute_fixity_check_process_id: "<Rosetta-Process-ID-for-Fixity-Check-Job>"
 #    institute_department: "<space-separated-list-of-Rosetta-Departments-like-CONSORTIUM.INS.DEPT>"
-#    timer_interval: "<timer-interval-in-systemd-time-syntax>"
-#    # timer_interval: "{{ ( (24 * 60 * 60) / (<Number_of-IEs> / 36500) ) + 1 | round(0,'floor') | int }} seconds"
-#    # timer_interval: "{{ ( (24h * 60min * 60sec) / (<Number-of-IEs> / (365days / 100)) ) + 1 | round(0,'floor') | int }} seconds"           # Timer Interval explained
-#      # documented at https://www.freedesktop.org/software/systemd/man/systemd.time.html