#!/bin/sh set -e set -u # DEBUG set -x # TODO there is a conflict between two shared vars # 1. from the original docker compose devicehub-teal # 2. from the new docker compose that integrates all dpp services wait_for_dpp_shared() { while true; do # specially ensure VERAMO_API_CRED_FILE is not empty, # it takes some time to get data in OPERATOR_TOKEN_FILE='operator-token.txt' if [ -f "/shared/${OPERATOR_TOKEN_FILE}" ] && \ [ -f "/shared/create_user_operator_finished" ]; then sleep 5 echo "Files ready to process." break else echo "Waiting for file in shared: ${OPERATOR_TOKEN_FILE}" sleep 5 fi done } # 3. Generate an environment .env file. # TODO cargar via shared gen_env_vars() { INIT_ORG="${INIT_ORG:-example-org}" INIT_USER="${INIT_USER:-user@example.org}" INIT_PASSWD="${INIT_PASSWD:-1234}" ADMIN='True' PREDEFINED_TOKEN="${PREDEFINED_TOKEN:-}" # specific dpp env vars if [ "${DPP:-}" = 'true' ]; then # fill env vars in this docker entrypoint wait_for_dpp_shared export API_DLT='http://api_connector:3010' export API_DLT_TOKEN="$(cat "/shared/${OPERATOR_TOKEN_FILE}")" export API_RESOLVER='http://id_index_api:3012' # TODO hardcoded export ID_FEDERATED='DH1' # propagate to .env dpp_env_vars="$(cat < .env < "${DATASET_FILE}" < 'example/snapshots/snapshot_workbench-script_verifiable-credential.json' fi /usr/bin/time ./manage.py up_snapshots example/snapshots/ "${INIT_USER}" } config_phase() { # TODO review this flag file init_flagfile="${program_dir}/already_configured" if [ ! -f "${init_flagfile}" ]; then # non DL user (only for the inventory) ./manage.py add_institution "${INIT_ORG}" # TODO: one error on add_user, and you don't add user anymore ./manage.py add_user "${INIT_ORG}" "${INIT_USER}" "${INIT_PASSWD}" "${ADMIN}" "${PREDEFINED_TOKEN}" if [ "${DPP:-}" = 'true' ]; then # 12, 13, 14 config_dpp_part1 # cleanup other snapshots and copy dlt/dpp snapshots # TODO make this better rm example/snapshots/* cp example/dpp-snapshots/*.json example/snapshots/ fi # # 15. Add inventory snapshots for user "${INIT_USER}". if [ "${DEMO:-}" = 'true' ]; then run_demo fi # remain next command as the last operation for this if conditional touch "${init_flagfile}" fi } check_app_is_there() { if [ ! -f "./manage.py" ]; then usage fi } deploy() { # TODO this is weird, find better workaround git config --global --add safe.directory "${program_dir}" export COMMIT=$(git log --format="%H %ad" --date=iso -n 1) if [ "${DEBUG:-}" = 'true' ]; then ./manage.py print_settings else echo "DOMAIN: ${DOMAIN}" fi # detect if existing deployment (TODO only works with sqlite) if [ -f "${program_dir}/db/db.sqlite3" ]; then echo "INFO: detected EXISTING deployment" ./manage.py migrate else # move the migrate thing in docker entrypoint # inspired by https://medium.com/analytics-vidhya/django-with-docker-and-docker-compose-python-part-2-8415976470cc echo "INFO detected NEW deployment" ./manage.py migrate config_phase fi } runserver() { PORT="${PORT:-8000}" if [ "${DEBUG:-}" = 'true' ]; then ./manage.py runserver 0.0.0.0:${PORT} else # TODO #./manage.py collectstatic true if [ "${EXPERIMENTAL:-}" ]; then # TODO # reloading on source code changing is a debugging future, maybe better then use debug # src https://stackoverflow.com/questions/12773763/gunicorn-autoreload-on-source-change/24893069#24893069 # gunicorn with 1 worker, with more than 1 worker this is not expected to work #gunicorn --access-logfile - --error-logfile - -b :${PORT} trustchain_idhub.wsgi:application true else ./manage.py runserver 0.0.0.0:${PORT} fi fi } main() { program_dir='/opt/devicehub-django' cd "${program_dir}" gen_env_vars deploy runserver } main "${@}"