#! /usr/bin/env bash

# CAUTION:
# This file was generated by generator-canonical-webteam@3.4.3
# and should usually not be edited directly.
#
# This file was generated by the "canonical-webteam" Yeoman generator
# https://npmjs.org/package/generator-canonical-webteam
#
# Update it to the latest version with:
#
# $ sudo npm install -g yo generator-canonical-webteam
# $ yo canonical-webteam:run

set -euo pipefail

USAGE="How to use ./run v3.4.3
===

  $ ./run \\
    [-e|--env VAR_NAME=value]  # Declare an environment variable to use while running commands \\
    [-m|--node-module PATH]    # A path to a local node module to use instead of the installed dependencies \\
    [COMMAND]                  # Optionally provide a command to run

If no COMMAND is provided, \`serve\` will be run.

Commands
---

- serve [-p|--port PORT] [-d|--detach] [-f|--forward-port]: Run a development server
- watch [-s|--watch-site]: Run \`yarn run watch\` (for jekyll sites, watch for changes with \`--watch-site\`)
- build: Run \`yarn run build\`
- test: Run \`yarn run test\`
- test-python: Run \`yarn run test-python\`
- lint-python: Run \`yarn run lint-python\`
- stop: Stop any running containers
- exec [-r|--root] [-p|--expose-port PORT] <args>: Run a command in the development container (optionally exposing a port to the host)
- yarn [-r|--root] [-p|--expose-port PORT] <script-name>: Run a yarn script from package.json
- clean: Remove all images and containers, any installed dependencies and the .docker-project file
- clean-cache: Empty cache files, which are saved between projects (eg, yarn)
"

##
# Variable definitions
##

# Define docker images versions
dev_image="canonicalwebteam/dev:1.7.0"
if [ -n "${DOCKER_REGISTRY:-}" ]; then
    dev_image="${DOCKER_REGISTRY}/${dev_image}"
fi

# Interactivity options
[ -t 1 ] && tty="--tty --interactive" || tty=""           # Do we have a terminal?
[ -f .env ] && env_file="--env-file .env" || env_file=""  # Do we have an env file?
[ -f .env.local ] && env_file="${env_file} --env-file .env.local" || env_file=$env_file  # Do we have a local env file?

# Defaults environment settings
PORT=8000

# Import environment settings
if [ -f .env ]; then
    source .env
fi

# Other variables
run_serve_docker_opts="${CANONICAL_WEBTEAM_RUN_SERVE_DOCKER_OPTS:-}"
module_volumes=""
env_vars=""

# Decide which md5 command to use
if $(command -v md5sum > /dev/null); then md5_command="md5sum";
elif $(command -v md5 > /dev/null); then md5_command="md5";
else echo "No md5 tool available. Exiting."; exit 1; fi

##
# Check docker is installed correctly
##
if ! command -v docker >/dev/null 2>&1; then
    echo "
    Error: Docker not installed
    ==
    Please install Docker before continuing:
    https://www.docker.com/products/docker
    "
    exit 1
fi
if grep -q '^docker:' /etc/group && ! groups | grep -q '\bdocker\b'; then
    echo "
    Error: `whoami` not in docker group
    ===
    Please add this user to the docker group, e.g. with:
    \$ newgrp docker
    "
    exit 1
fi

# Grab HTTP_PROXY settings from the host
http_proxy=""
yarn_proxy=""
if [ -n "${HTTP_PROXY:-}" ]; then
    http_proxy="--env HTTP_PROXY=${HTTP_PROXY} --env http_proxy=${HTTP_PROXY}"
    yarn_proxy="--proxy ${HTTP_PROXY}"
fi
if [ -n "${HTTPS_PROXY:-}" ]; then
    http_proxy="${http_proxy} --env HTTPS_PROXY=${HTTPS_PROXY} --env https_proxy=${HTTPS_PROXY}"
    yarn_proxy="${yarn_proxy} --https-proxy ${HTTPS_PROXY}"
fi

# Generate the project name
if [[ -f ".docker-project" ]]; then
    project=$(cat .docker-project)
else
    directory="$(basename "$(pwd)")"
    hash=$(pwd | ${md5_command} | cut -c1-8)
    project=canonical-webteam-${directory}-${hash}
    echo $project > .docker-project
fi

# Volume names
cache_volume="${CANONICAL_WEBTEAM_CACHE_VOLUME:-canonical-webteam-cache}"
etc_volume="${project}-etc"
usr_local_volume="${project}-usr-local"
db_volume="${project}-db"

# Container names
db_container="${project}-db"
pip_container="${project}-pip"

# Network name
network_name="${project}-net"

invalid() {
    message=${1}
    echo "Error: ${message}"
    echo ""
    echo "$USAGE"
    exit 1
}

# Read optional arguments
while [[ -n "${1:-}" ]] && [[ "${1:0:1}" == "-" ]]; do
    key="$1"

    case $key in
        -e|--env)
            if [ -z "${2:-}" ]; then invalid "Missing environment variables. Usage: --env XXXX=yyyy"; fi
            env_vars="${env_vars} --env ${2}"
            shift
        ;;
        -m|--node-module)
            if [ -z "${2:-}" ]; then invalid "Missing module name. Usage: --node-module <path-to-module>."; fi
            # Ensure directories exist, ready to host module volumes
            if [ ! -d "`pwd`/node_modules/$(basename ${2})" ]; then
                mkdir -p "`pwd`/node_modules/$(basename ${2})"
            fi
            module_volumes="${module_volumes} --volume=${2}:`pwd`/node_modules/$(basename ${2})"
            shift
        ;;
        -h|--help) echo "$USAGE"; exit ;;
        -v|--version) echo "Generated from generator-canonical-webteam@3.4.3"; exit ;;
        *) invalid "Option '${key}' not recognised." ;;
    esac
    shift
done

start_django_db () {
    # Run the database if necessary
    if grep -q django.db.backends.postgresql_psycopg2 */settings.py 2> /dev/null; then
        # Create isolated network
        if ! docker network inspect ${network_name} &> /dev/null; then
            docker network create ${network_name}
        fi

        # Start the database
        trap "kill_container ${db_container}" EXIT;
        if ! docker inspect -f {{.State.Running}} ${db_container} &>/dev/null; then
            docker run \
                --name ${db_container}       `# Name the container`  \
                --rm                         `# Remove the container once it's finished`  \
                --volume "${db_volume}":/var/lib/postgresql/data  `# Store dependencies in a docker volume`  \
                ${http_proxy}                `# Include HTTP proxy if needed`  \
                --network ${network_name}    `# Use an isolated network`  \
                --network-alias db           `# Call this container "db" on the network so it can be found`  \
                --detach                     `# Run in the background` \
                postgres                     `# Use the image for node version 7`
        fi

        # Wait for it
        wait_time=0
        until docker exec ${db_container} pg_isready || [ $wait_time -eq 4 ]; do
            sleep $(( wait_time++ ))
        done

        # Provision database
        run_as_user "${network}" python3 manage.py migrate
    fi
}

kill_container () {
    container_name="${1}"

    # Kill any previous containers
    previous_id=$(docker ps --all --quiet --filter "name=^/${container_name}$")
    if [ -n "${previous_id}" ]; then
        docker rm --force ${previous_id} > /dev/null;
    fi
}

docker_run () {
    # Get options
    docker_run_options="${1}"; shift

    # Generate container name from command
    container_name="${project}-${@}"
    container_name="${container_name// /_}"  # Replace spaces with underscores
    container_name=$(echo ${container_name} | tr -dc '[:alnum:]_.-')  # Remove disallowed chars

    # Use network if it's been setup
    network=""
    if docker network inspect ${network_name} &> /dev/null; then
        network="--network ${network_name}"
    fi

    # Kill existing containers
    kill_container "${container_name}"

    # Environment info
    commit_id=$(git rev-parse HEAD || echo "unknown")

    # Start the new container
    docker run  \
        --name ${container_name}     `# Name the container` \
        --rm                         `# Remove the container once it's finished`  \
        --volume "$(pwd):$(pwd)"     `# Mirror current directory inside container`  \
        --workdir "$(pwd)"           `# Set current directory to the image's work directory`  \
        --volume ${etc_volume}:/etc  `# Use etc with corresponding user added`  \
        --volume ${usr_local_volume}:/usr/local/       `# Bind local folder to volume`  \
        --volume ${cache_volume}:/home/shared/.cache/  `# Bind cache to volume` \
        --env COMMIT_ID=${commit_id} `# Pass through the commit ID` \
        ${network}                   `# Network settings, if needed` \
        ${env_file}                  `# Pass any files of environment variables to the container`  \
        ${env_vars}                  `# Pass explicit environment variables to the container`  \
        ${http_proxy}                `# Include HTTP proxy if needed`  \
        ${tty}                       `# Attach a pseudo-terminal, if relevant`  \
        ${docker_run_options}        `# Extra options`  \
        ${dev_image} $@              `# Run command in the image`
}

run_as_user () {
    run_as_user_options="${1}"; shift

    create_etc_volume

    docker_run "--user $(id -u):$(id -g) ${run_as_user_options}" $@
}

create_etc_volume() {
    # Create local user and group in the dev image
    uid=$(id -u)
    gid=$(id -g)

    if ! docker volume inspect -f " " ${etc_volume} 2> /dev/null; then
        etc_run="docker run --rm --volume ${etc_volume}:/etc ${dev_image}"
        if ! ${etc_run} grep -P "${gid}:$" /etc/group; then
            ${etc_run} groupadd -g ${gid} app-user
        fi

        if ! ${etc_run} grep -P "x:${uid}:" /etc/passwd; then
            ${etc_run} useradd -u ${uid} -g ${gid} app-user
        fi
    fi
}

update_dependencies() {
    # Make sure the etc volume has been created first
    create_etc_volume

    # Install yarn dependencies
    if [ -f package.json ]; then
        package_json_hash=$(${md5_command} package.json | cut -c1-8)
        if [ -d node_modules ]; then
            yarn_dependencies_hash=$(find node_modules -type f ! -wholename 'node_modules/.cache/*' -print0 | sort -z | xargs -0 ${md5_command} | ${md5_command} | cut -c1-8)-${package_json_hash}
        fi
        if [ -z "${yarn_dependencies_hash:-}" ] || [ ! -f .yarn.${project}.hash ] || [ "${yarn_dependencies_hash}" != "$(cat .yarn.${project}.hash)" ]; then
            echo "Installing new Yarn dependencies"
            run_as_user "" yarn install --force ${yarn_proxy}
            yarn_dependencies_hash=$(find node_modules -type f ! -wholename 'node_modules/.cache/*' -print0 | sort -z | xargs -0 ${md5_command} | ${md5_command} | cut -c1-8)-${package_json_hash}
            echo ${yarn_dependencies_hash} > .yarn.${project}.hash
            echo "Saved ${yarn_dependencies_hash} to .yarn.${project}.hash"
        else
            echo "Yarn dependencies haven't changed. To force an update, delete .yarn.${project}.hash."
        fi
    fi

    # Install bower dependencies
    if [ -f bower.json ]; then
        bower_json_hash=$(${md5_command} bower.json | cut -c1-8)
        if [ -d bower_components ]; then
            bower_dependencies_hash=$(find bower_components -type f -print0 | sort -z | xargs -0 ${md5_command} | ${md5_command} | cut -c1-8)-${bower_json_hash}
        fi
        if [ -z "${bower_dependencies_hash:-}" ] || [ ! -f .bower.${project}.hash ] || [ "${bower_dependencies_hash}" != "$(cat .bower.${project}.hash)" ]; then
            echo "Installing new bower dependencies"
            run_as_user "" bower install
            bower_dependencies_hash=$(find bower_components -type f -print0 | sort -z | xargs -0 ${md5_command} | ${md5_command} | cut -c1-8)-${bower_json_hash}
            echo ${bower_dependencies_hash} > .bower.${project}.hash
        else
            echo "Bower dependencies haven't changed. To force an update, delete .bower.${project}.hash."
        fi
    fi

    # Install ruby dependencies
    if [ -f Gemfile ]; then
        gemfile_hash=$(${md5_command} Gemfile | cut -c1-8)
        if [ -d vendor/bundle ]; then
            bundler_dependencies_hash=$(find vendor/bundle -type f -print0 | sort -z | xargs -0 ${md5_command} | ${md5_command} | cut -c1-8)-${gemfile_hash}
        fi
        if [ -z "${bundler_dependencies_hash:-}" ] || [ ! -f .bundler.${project}.hash ] || [ "${bundler_dependencies_hash}" != "$(cat .bundler.${project}.hash)" ]; then
            echo "Installing new bundler dependencies"
            run_as_user "" bundle install --path vendor/bundle
            bundler_dependencies_hash=$(find vendor/bundle -type f -print0 | sort -z | xargs -0 ${md5_command} | ${md5_command} | cut -c1-8)-${gemfile_hash}
            echo ${bundler_dependencies_hash} > .bundler.${project}.hash
        else
            echo "Bundler dependencies haven't changed. To force an update, delete .bundler.${project}.hash."
        fi
    fi

    # Install pip dependecies
    if [ -f requirements.txt ]; then
        requirements_hash=$(${md5_command} requirements.txt | cut -c1-8)
        pip_dependencies_hash=$(docker run --volume ${etc_volume}:/etc ${dev_image} bash -c 'find $(find /usr/local/lib/ -maxdepth 1 -name "python*" -type d | sort | tail -n 1)/dist-packages -type f -print0 | sort -z | xargs -0 '${md5_command}' | '${md5_command}' | cut -c1-8')-${requirements_hash}
        if [ ! -f .pip.${project}.hash ] || [ "${pip_dependencies_hash}" != "$(cat .pip.${project}.hash)" ]; then
            echo "Installing new pip dependencies"
            docker_run "" pip3 install --requirement requirements.txt
            pip_dependencies_hash=$(docker run --volume ${etc_volume}:/etc ${dev_image} bash -c 'find $(find /usr/local/lib/ -maxdepth 1 -name "python*" -type d | sort | tail -n 1)/dist-packages -type f -print0 | sort -z | xargs -0 '${md5_command}' | '${md5_command}' | cut -c1-8')-${requirements_hash}
            echo ${pip_dependencies_hash} > .pip.${project}.hash
        else
            echo "Pip dependencies haven't changed. To force an update, delete .pip.${project}.hash."
        fi
    fi
}

# Find current run command
run_command=${1:-}
if [[ -n "${run_command}" ]]; then shift; fi

# Do the real business
case $run_command in
    ""|"serve")
        update_dependencies

        # Read optional arguments
        detach=""
        forward_ports=("")
        run_watcher=false
        while [[ -n "${1:-}" ]] && [[ "${1:0:1}" == "-" ]]; do
            key="$1"

            case $key in
                -d|--detach) detach="--detach" ;;
                -p|--port)
                    if [ -z "${2:-}" ]; then invalid "Missing port number. Usage: --port XXXX"; fi
                    PORT=${2}
                    shift
		;;
		-f|--forward-port)
		    if [ -z "${2:-}" ]; then invalid "Missing port number. Usage: --port XXXX"; fi
		    forward_ports+=("${2}")
		    shift
                ;;
                *) invalid "Option '${key}' not recognised." ;;
            esac
            shift
        done

        # Setup yarn dependencies
        if [ -f package.json ]; then
            run_as_user "${module_volumes}" yarn run build
        fi

        # Run watch command in the background
        if ${run_watcher}; then
            if [ -z "${detach}" ];  then trap "kill_container ${project}-watch" EXIT; fi
            run_as_user "--detach" yarn run watch  # Run watch in the background
        fi

        publish_extra_ports=""
        if [ -n "${EXTRA_PORTS:-}" ]; then
            IFS=', ' read -r -a ports_array <<< "$EXTRA_PORTS"
            for extra_port in "${ports_array[@]}"; do
                publish_extra_ports="${publish_extra_ports} --publish $extra_port:$extra_port"
            done
        fi

        publish_forward_ports=""
        for forward_port in "${forward_ports[@]}"; do
            if [ -n "${forward_port}" ]; then
                publish_forward_ports="${publish_forward_ports} --publish ${forward_port}:${PORT}"
            fi
        done

        start_django_db

        # Run the serve container, publishing the port, and detaching if required
        run_as_user "--env PORT=${PORT} --publish ${PORT}:${PORT} ${publish_forward_ports} ${publish_extra_ports} ${detach} ${run_serve_docker_opts} ${module_volumes}" yarn run serve $*
    ;;
    "stop")
        echo "Stopping all running containers for ${project}"
        running_containers="$(docker ps --quiet --filter name=${project})"
        if [ -z "${running_containers}" ]; then
            echo "No running containers found"
            exit 0
        fi
        docker kill ${running_containers}
    ;;
    "watch")
        update_dependencies

        # Read optional arguments
        watch_site=false
        while [[ -n "${1:-}" ]] && [[ "${1:0:1}" == "-" ]]; do
            key="$1"

            case $key in
                -s|--watch-site)
                    # Error if not a jekyll site
                    if [ ! -f _config.yml ]; then
                        echo "Error: Not a Jekyll site";
                        exit 1;
                    fi
                    watch_site=true
                ;;
                *) invalid "Option '${key}' not recognised." ;;
            esac
            shift
        done
        if ${watch_site}; then
            trap "kill_container ${project}-watch-site" EXIT
            run_as_user "--detach" jekyll build --watch  # Run site watcher in the background
        fi
        run_as_user "${module_volumes}" yarn run build
        run_as_user "${module_volumes}" yarn run watch
    ;;
    "build")
        update_dependencies

        run_as_user "${module_volumes}" yarn run build

        if [ -f _config.yml ]; then
            # For jekyll sites
            run_as_user "" bundle exec jekyll build
        fi
    ;;
    "test")
        update_dependencies

        test_error=false

        # Run node tests
        echo "- Running yarn tests"
        run_as_user "" yarn run test || test_error=true

        # Report success or failure
        if ${test_error}; then
            echo "==="
            echo "Tests failed"
            echo "==="
            exit 1
        else
            echo "==="
            echo "Tests succeeded"
            echo "==="
        fi
    ;;
    "test-python")
        update_dependencies

        test_error=false

        # Run node tests
        echo "- Running python tests"
        run_as_user "" yarn run test-python || test_error=true

        # Report success or failure
        if ${test_error}; then
            echo "==="
            echo "Tests failed"
            echo "==="
            exit 1
        else
            echo "==="
            echo "Tests succeeded"
            echo "==="
        fi
    ;;
    "lint-python")
        update_dependencies

        lint_error=false

        # Run node tests
        echo "- Running python lint"
        run_as_user "" yarn run lint-python || lint_error=true

        # Report success or failure
        if ${lint_error}; then
            echo "==="
            echo "Lint failed"
            echo "==="
            exit 1
        else
            echo "==="
            echo "Lint succeeded"
            echo "==="
        fi
    ;;
    "clean")
        echo "Remove hash files"
        rm -rf .*.hash

        echo "Running 'clean' yarn script"
        run_as_user "" yarn run clean || true  # Run the clean script

        echo "Removing docker objects for project: ${project}"

        echo "- Removing containers using project volumes"
        project_volumes="$(docker volume ls --quiet --filter name=${project})"
        for volume in ${project_volumes}; do
            echo "  > Removing containers using volume ${volume}"
            containers_using_volume="$(docker ps --all --quiet --filter volume=${volume})"
            if [ -n "${containers_using_volume}" ]; then docker rm --force ${containers_using_volume}; fi
        done
        echo "- Removing project volumes"
        if [ -n "${project_volumes}" ]; then docker volume rm ${project_volumes}; fi

        echo "- Removing remaining project containers"
        project_containers="$(docker ps --all --quiet --filter name=${project})"
        if [ -n "${project_containers}" ]; then docker rm --force ${project_containers}; fi

        echo "- Removing project networks"
        project_networks="$(docker network ls --quiet --filter name=${project})"
        if [ -n "${project_networks}" ]; then docker network rm ${project_networks}; fi

        echo "Removing .docker-project file"
        rm -rf .docker-project  # Remove the project file
    ;;
    "clean-cache")
        # Clean node cache volume
        echo "Removing cache volume ${cache_volume}"
        containers_using_volume=$(docker ps --quiet --all --filter "volume=${cache_volume}")
        if [ -n "${containers_using_volume}" ]; then docker rm --force ${containers_using_volume}; fi
        docker volume rm ${cache_volume}
    ;;
    "exec")
        expose_ports=""
        run_as_root=false

        while [[ -n "${1:-}" ]] && [[ "${1:0:1}" == "-" ]]; do
            key="$1"

            case $key in
                -r|--root)
                    run_as_root=true
                ;;
                -p|--expose-port)
                    if [ -z "${2:-}" ]; then invalid "Missing port number. Usage: --expose-port XXXX"; fi
                    expose_ports="${expose_ports} --publish ${2}:${2}"
                    shift
                ;;
                *) invalid "Option '${key}' not recognised." ;;
            esac
            shift
        done

        update_dependencies
        start_django_db

        if ${run_as_root}; then
            docker_run "${expose_ports}" $@
        else
            run_as_user "${expose_ports}" $@
        fi
    ;;
    "yarn")
        expose_ports=""
        run_as_root=false

        while [[ -n "${1:-}" ]] && [[ "${1:0:1}" == "-" ]]; do
            key="$1"

            case $key in
                -r|--root)
                    run_as_root=true
                ;;
                -p|--expose-port)
                    if [ -z "${2:-}" ]; then invalid "Missing port number. Usage: --expose-port XXXX"; fi
                    expose_ports="${expose_ports} --publish ${2}:${2}"
                    shift
                ;;
                *) invalid "Option '${key}' not recognised." ;;
            esac
            shift
        done

        update_dependencies
        start_django_db

        if ${run_as_root}; then
            docker_run "${expose_ports}" yarn run $@
        else
            run_as_user "${expose_ports}" yarn run $@
        fi
    ;;
    *) invalid "Command '${run_command}' not recognised." ;;
esac
