From 7abf51a9668a3cc992e75b34460673d9af611fdc Mon Sep 17 00:00:00 2001 From: Erik Dannenberg Date: Thu, 10 Jan 2019 18:27:11 +0100 Subject: [PATCH] Refactor project for 0.9.0 release It's a big one, probably incomplete changes list: * Raise Bash min version to 4.2, add jq as host dependency for Docker usage * Add a Kubler bash-completion script * Prepare Kubler for installation as system app * Add support for user managed custom commands/engines or overrides * Prepare for split of example images into a separate git repo * Minor refactor of the build process, -i now builds missing parents for example * Add configure_builder hook, replaces configure_bob. Both will be supported for a while though * The portage container is no longer updated via git, the amount of upstream changes is too large, snapshots all the way now per default * Run a Docker health-check or custom image test script as part of the build * Review/improve all ui output for consistency, also now in glorius 8bit color! * Add compact output mode, if enabled almost all cmd output is logged to file logs * Add dep-graph command to visualize image dependencies in dot/ascii/png * Add a few clean command options to remove built and dangling images * Improve 'new' command user handling to be less confusing * Lots of minor improvements and some fixes for edge cases See also #154 --- .travis.yml | 4 +- README.md | 223 ++++---- kubler.conf | 62 ++- kubler.sh | 110 +++- lib/argbash/clean.sh | 64 --- lib/bob-core/Dockerfile.template | 2 - lib/bob-core/acserver-push.sh | 23 - lib/bob-core/build-root.sh | 20 +- lib/bob-core/etc/acserver.yml | 10 - lib/{ => cmd}/argbash/README.md | 0 lib/{ => cmd}/argbash/argbash-refresh.sh | 0 lib/{ => cmd}/argbash/build.sh | 18 +- lib/cmd/argbash/clean.sh | 112 ++++ lib/cmd/argbash/dep-graph.sh | 125 +++++ lib/{ => cmd}/argbash/new.sh | 0 lib/{ => cmd}/argbash/opt-global.m4 | 0 lib/{ => cmd}/argbash/opt-main.sh | 9 +- lib/{ => cmd}/argbash/push.sh | 0 lib/{ => cmd}/argbash/update.sh | 0 lib/cmd/build.sh | 320 +++++++---- lib/cmd/clean.sh | 94 +++- lib/cmd/dep-graph.sh | 97 ++++ lib/cmd/new.sh | 149 ++--- lib/cmd/push.sh | 9 +- lib/cmd/update.sh | 81 +-- lib/core.sh | 527 ++++++++++-------- lib/engine/acbuild.sh | 167 ------ lib/engine/docker.sh | 510 ++++++++++++----- lib/engine/dummy.sh | 8 +- lib/kubler-completion.bash | 178 ++++++ lib/template/docker/builder/build.conf | 4 +- lib/template/docker/builder/build_ext.sh | 25 + .../builder/{build.sh => build_stage3.sh} | 11 +- lib/template/docker/image/Dockerfile.template | 3 + lib/template/docker/image/build-test.sh | 8 + lib/template/docker/image/build.conf | 28 +- lib/template/docker/image/build.sh | 2 +- .../docker/image/docker-healthcheck.sh | 8 + .../docker/namespace/kubler.conf.multi | 31 +- .../docker/namespace/kubler.conf.single | 6 +- lib/util.sh | 456 +++++++++++++++ 41 files changed, 2452 insertions(+), 1052 deletions(-) delete mode 100755 lib/argbash/clean.sh delete mode 100755 lib/bob-core/acserver-push.sh delete mode 100644 lib/bob-core/etc/acserver.yml rename lib/{ => cmd}/argbash/README.md (100%) rename lib/{ => cmd}/argbash/argbash-refresh.sh (100%) rename lib/{ => cmd}/argbash/build.sh (88%) create mode 100755 lib/cmd/argbash/clean.sh create mode 100755 lib/cmd/argbash/dep-graph.sh rename lib/{ => cmd}/argbash/new.sh (100%) rename lib/{ => cmd}/argbash/opt-global.m4 (100%) rename lib/{ => cmd}/argbash/opt-main.sh (89%) rename lib/{ => cmd}/argbash/push.sh (100%) rename lib/{ => cmd}/argbash/update.sh (100%) create mode 100644 lib/cmd/dep-graph.sh delete mode 100644 lib/engine/acbuild.sh create mode 100755 lib/kubler-completion.bash create mode 100644 lib/template/docker/builder/build_ext.sh rename lib/template/docker/builder/{build.sh => build_stage3.sh} (76%) create mode 100644 lib/template/docker/image/build-test.sh create mode 100644 lib/template/docker/image/docker-healthcheck.sh create mode 100644 lib/util.sh diff --git a/.travis.yml b/.travis.yml index e3215faa..34f5d05c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,5 +9,5 @@ before_install: - docker pull koalaman/shellcheck script: - - docker run -w /scripts -v $(pwd):/scripts koalaman/shellcheck -x - kubler.sh lib/*.sh lib/engine/docker.sh lib/cmd/*.sh lib/bob-core/*.sh lib/argbash/argbash-refresh.sh + - docker run --rm -w /scripts -v $(pwd):/scripts koalaman/shellcheck -x + kubler.sh lib/*.sh lib/engine/docker.sh lib/cmd/*.sh lib/bob-core/*.sh lib/cmd/argbash/argbash-refresh.sh diff --git a/README.md b/README.md index 33546b06..8ad65720 100644 --- a/README.md +++ b/README.md @@ -10,174 +10,139 @@ and the German name Schmidt, the cooper trade is also the origin of German names > There is still demand for high-quality ~~wooden barrels~~ containers, and it is thought that the highest-quality ~~barrels~~ containers are those hand-made by professional ~~coopers~~ kublers. -At the core Kubler is just a simple ~~craftsman~~ bash script that, well, builds things.. and things that -can depend on other things. It does'nt really care all too much about the details as long as it gets -to build. So what, some ~~people~~ scripts just like to build things. Don't judge. +## Why Should You Care? -What kind of things? In theory your imagination is the limit, but we provide batteries for building -[Docker][] images, with [acbuild][] (read: rkt and OCI) support in the works. PR are always welcome. ;) +Perhaps: -Due to it's unrivaled flexibility [Gentoo][] is used under the hood as build container base, -however the final images hold just the runtime dependencies for selected software packages, resulting -in very slim images. To achieve this a 2 phase build process is employed, essentially the often requested, but -still missing, Docker feature for [nested](https://github.com/docker/docker/issues/7115) image builds. +1. You love Docker but are annoyed by some of the restrictions of it's `build` command that keep + getting into your way. Wouldn't it be nice if you could `build` your images with all `docker run` + args, like `-v`, at your disposal? +2. You are a SysAdmin or DevOps engineer who seeks complete governance for the contents of their + Docker images, with full control of the update cycle and the ability to track software version + changes across the board from a centralized vcs repository. +3. You need to manage a **lot** of Docker base/service images in a sane way and want peace of mind + with automated post-build tests. +4. You are a Gentoo user that wants to build slim Gentoo based images without having to wrestle + with CrossDev. +5. You are looking for an interactive OS host agnostic Gentoo playground or a portable ebuild + development environment. +6. You want to create custom root file systems, possibly for different cpu architectures, in a safe + and repeatable manner. -## Goals +## Cool. So What Exactly Is A Container Image Meta Builder? -* Central, organization-wide management of base images -* Containers should only contain the bare minimum to run - * Separate build and runtime dependencies - * Only deploy runtime dependencies -* Maximum flexibility while assembling the root file system, but with minimal effort -* Keep things maintainable as the stack grows +While Kubler was designed primarily for building and managing container images it doesn't +particularly care about the way those images are built. At the core it's just a glorified directory +crawler, with a simple dependency mechanism, that fires a command on a selected image or namespace +dependency graph. -## Status +The actual build logic is abstracted away into pluggable engines that may orchestrate other tools, +like Docker, to create the final image, or whatever the selected namespace's configured engine +produces. -* Stable for a while now and used in production -* Monthly update cycle for all reference images +Kubler is extendable, users may provide their own commands and/or build engines in a maintainable +way. As both are just plain old Bash scripts this is usually a simple* and straight forward process +with almost no limitations. -## Features +`{ font-size: 2px; }` * Additional rates of blood, sweat and tears may apply when implementing a new engine -* Decoupled build logic -* Maintain multiple image stacks with differing build engines -* Generic [root-fs][bob-core] build script to quickly bootstrap a [Gentoo][] based build container -* Utilizes Gentoo's [binary package][] features for quick rebuilds -* Simple hook system allows for full control of the build process while retaining modularity -* Generic image and builder dependencies that can be utilized by build engines -* Automated image [documentation][nginx-packages] and history when using a CVS +## Requirements -### Docker Features +#### Kubler -* Essentially enables [nested](https://github.com/docker/docker/issues/7115) docker builds -* Everything happens in docker containers except for some bash glue on the build host -* Glibc, Musl and Uclibc based build containers, each with complete toolchain out of the box -* Tiny static busybox-musl root image (~1.2mb), FROM scratch is fine too -* Shared layer support for final images, images are not squashed and can depend on other images -* [s6][] instead of [OpenRC][] as default supervisor (small footprint (<1mb) and proper docker SIGTERM handling), -optional of course -* Reference images are available on [docker hub][kubler-docker] -* Push built image stack(s) to a public or private docker registry +* Bash version 4.2+, using 4.4+ is highly recommended due to bugs in previous versions. -### Requirements +Optional: + +* GPG for download verification + +#### Docker Build Engine -* Bash 4.x * Working Docker setup +* GIT +* jq to parse Docker json output -Optional: +## Installation -* GPG for download verification +#### On Gentoo -Kubler has been tested on Gentoo, CoreOS and macOS. It should run on all Linux distributions. +An ebuild can be found at https://github.com/edannenberg/kubler-overlay/ -## How much do I save? +Add the overlay and install as usual: -* Quite a bit, the Nginx Docker image, for example, clocks in at ~17MB, compared to >1GB for a full Gentoo version -or ~300MB for a similiar Ubuntu version + emerge -av kubler -## Quick Start +#### Manual Installation - $ git clone https://github.com/edannenberg/kubler.git +Kubler has been tested on Gentoo, CoreOS and macOS. It should run on all Linux distributions. -Kubler needs a `working-dir` to operate from, much like `git` needs to be called from inside a git repo for most of its -functionality. You may also call Kubler from any sub directory and it will detect the proper path. The Kubler git repo -comes with an example image stack, let's build a provided `glibc` image: +1. Clone the repo or download/extract the release archive to a location of your choice, i.e. - $ cd kubler/ - $ ./kubler.sh build kubler/glibc + $ cd ~/tools/ + $ curl -L https://github.com/edannenberg/kubler/archive/master.tar.gz | tar xz -This will build a `kubler/busybox` and `kubler/glibc` image. You also get a glibc and musl based build container for -free, which you can utilize for your own images. +2. Add `kubler.sh` to your path -* You may add `kubler.sh` to your `PATH`, one-liner: `export PATH="${PATH}:/path/to/kubler/bin"` -* If you don't have GPG available use `build -s ..` to skip verification of downloaded files (SHA512 is still checked) -* The directories in `./dock/kubler/images/` contain image specific documentation +If you are unsure add the following at the end of your `~/.bashrc` file, don't forget to adjust the +path for each line accordingly: -Note: If you get a 404 error on downloading a Gentoo stage3 tar ball try running `kubler update` to resolve the issue. -The Gentoo servers only keep those files for a few weeks. + export PATH="${PATH}:/path/to/kubler/bin" + # optional but highly recommended, adds bash completion support for all kubler commands + source /path/to/kubler/lib/kubler-completion.bash -The first run will take quite a bit of time, don't worry, once the build containers and binary package cache -are seeded future runs will be much faster. +Note: You will need to open a new shell for this to take effect, if this fails on a Linux SystemD +host re-logging might be required instead. -## Creating a new namespace +#### Uninstall -Images are kept in a `namespace` directory in `--working-dir`. You may have any number of namespaces. A helper is -provided to take care of the boiler plate for you: +1. Remove any build artifacts and Docker images created by Kubler: -``` - $ cd kubler/ - $ ./kubler.sh new namespace testing - - --> Who maintains the new namespace? - Name (John Doe): My Name - EMail (john@doe.net): my@mail.org - --> What type of images would you like to build? - Engine (docker): + $ kubler clean -N - --> Successfully added "testing" namespace at ./dock/testing +2. Delete the two entries from `~/.bashrc` you possibly added during manual installation - $ tree dock/testing/ - dock/testing/ - |-- .gitignore - |-- kubler.conf - .-- README.md -``` +3. Delete any namespace dirs and configured `KUBLER_DATA_DIR` (default is `~/.kubler/`) you had in + use, this may require su permissions. -You are now ready to work on your shiny new image stack. +## Quick Start -## Adding Docker images +#### The Basics -Let's create a [Figlet](http://www.figlet.org/) test image in our new namespace. If you chose a more -sensible namespace name above replace `testing` accordingly: +To get a quick overview/reminder of available commands/options run: -``` - $ ./kubler.sh new image testing/figlet + $ kubler --help - --> Extend an existing image? Fully qualified image id (i.e. kubler/busybox) if yes or scratch - Parent Image (scratch): kubler/glibc +To view details for specific command: - --> Successfully added testing/figlet image at ./dock/testing/images/figlet -``` + $ kubler build -h -We used `kubler/glibc` as parent image, or what you probably know as `FROM` in your `Dockerfiles`. -The namespace now looks like this: - -``` - $ tree dock/testing/ - dock/testing/ - |-- kubler.conf - |-- images - |   .-- figlet - |   |-- build.conf - |   |-- build.sh - |   |-- Dockerfile.template - |   .-- README.md - .-- README.md -``` +Almost all of Kubler's commands will need to be run from a `--working-dir`, if the option is +omitted the current working dir of the executing shell is used. It functions much like Git in that +regard, executing any Kubler command from a sub directory of a valid working dir will also work as +expected. -Edit the new image's build script located at `./dock/testing/images/figlet/build.sh` and add `app-misc/figlet` to the -`_packages` variable: +A `--working-dir` is considered valid if it has a `kubler.conf` file and either an `images/` dir or +one ore more namespace dirs, which are just a collection of images. -``` -_packages="app-misc/figlet" -``` +#### Setup A New Namespace -When it's time to build this will instruct the build container in the *first build phase* to install the given package(s) -from Gentoo's package tree at an empty directory. It's content is then exported to the host as a `rootfs.tar` file. -In the *second build phase* a normal Docker build is started and the `rootfs.tar` file is added to the final image. +First switch to a directory where you would like to store your Kubler managed images or namespaces: -See the 'how does it work' section below for more details on the build process. Also make sure to read the comments -in `build.sh`. But let's build the darn thing already: + $ cd ~/workspace -``` - $ ./kubler.sh build testing -``` +Then use the `new` command to take care of the boiler plate for you, choose `single` as namespace +type when asked: -Once that finishes we are ready to take the image for a test drive: + $ kubler new namespace mytest + $ cd mytest -``` - $ docker images | grep /figlet - $ docker run -it --rm kubler/figlet figlet kubler sends his regards -``` +### Hello Image + +To create a new image in the current working dir: + + $ kubler new image mytest/figlet + +#TODO: finish docs Some useful options for while working on an image: @@ -245,6 +210,18 @@ this preserves exact build state Build container names generally start with `*/bob`, when a new build container state is committed the current image name gets appended. For example `kubler/bob-openssl` refers to the container used to build the `kubler/openssl` image. +## Other Resources + +* An excellent blog post, written by [@berney][], can be found at https://www.elttam.com.au/blog/kubler/ + +## Discord Community + +For questions or chatting with other users you may join our Discord server at: + +https://discord.gg/rH9R7bc + +You just need a username, email verification with Discord is not required. + ## Thanks [@wking][] for his [gentoo-docker][] repo which served as an excellent starting point diff --git a/kubler.conf b/kubler.conf index ddd0ce93..8e787b20 100644 --- a/kubler.conf +++ b/kubler.conf @@ -1,33 +1,59 @@ -AUTHOR="${AUTHOR:-Erik Dannenberg }" -# Global tag (a.k.a version) for all images -IMAGE_TAG="${IMAGE_TAG:-20181130}" -# Cheeky example that always sets the current date: -#IMAGE_TAG="${IMAGE_TAG:-$(date +%Y%m%d)}" - -# Portage snapshot date that is used to bootstrap the portage container -PORTAGE_DATE="${PORTAGE_DATE:-latest}" +# The first config file read by Kubler. System wide settings, users may override via KUBLER_DATA_DIR/kubler.conf or +# current --working-dir. + +# Image version, ideally set via user config so it's shared by all --working-dirs of current user +#IMAGE_TAG='20190123' +# Default maintainer, override via namespace kubler.conf +AUTHOR='Erik Dannenberg ' + +# Kubler's runtime data dir, needs to be writable by the current user +#KUBLER_DATA_DIR="${HOME}/.kubler}" +# Gentoo's stage3 and portage files download location +#KUBLER_DOWNLOAD_DIR="${KUBLER_DATA_DIR}/downloads" +# Gentoo's build related downloads, like source code, patches, etc +#KUBLER_DISTFILES_DIR="${KUBLER_DATA_DIR}/distfiles" +# Gentoo's binary package cache location +#KUBLER_PACKAGES_DIR="${KUBLER_DATA_DIR}/packages" + +# Ouput related config +# Disable compact output, effectively always passes -v to all commands +#KUBLER_VERBOSE='false' +# If true and compact output is enabled send output to log file instead of /dev/null +#KUBLER_CMD_LOG='true' +# Shall we ring the terminal bell on error? +#KUBLER_BELL_ON_ERROR='true' +# Disabling this only works when set as ENV before starting Kubler +#KUBLER_COLORS='true' + +# Update the portage container via git. Not recommended as it can be quite slow due to the amount of upstream changes. +#KUBLER_PORTAGE_GIT='false' +# Not recommended unless you are building your image stack from scratch and with your own stage3 build containers +#KUBLER_DISABLE_KUBLER_NS='false' +# Effectively always enables -s for the build command +#KUBLER_DISABLE_GPG='false' + +# Portage snapshot date that is used to bootstrap the portage container, 'latest' is highly recommended +#PORTAGE_DATE='latest' # Download location for stage3 and Portage files, use whitespace to set multiple servers # You may visit https://www.gentoo.org/downloads/mirrors/ and pick a http or ftp url near your physical location -MIRROR="${MIRROR:-http://distfiles.gentoo.org/}" - -DOWNLOAD_PATH="${DOWNLOAD_PATH:-${_KUBLER_DIR}/tmp/downloads}" +MIRROR='http://distfiles.gentoo.org/' # You can also define these per namespace conf -BUILD_ENGINE="${BUILD_ENGINE:-docker}" -DEFAULT_BUILDER="${DEFAULT_BUILDER:-kubler/bob}" +BUILD_ENGINE='docker' +DEFAULT_BUILDER='kubler/bob' # Variables starting with BOB_ are exported as ENV to all build containers # init Portage's make.conf defaults -BOB_GENTOO_MIRRORS="${BOB_GENTOO_MIRRORS:-${MIRROR}}" +BOB_GENTOO_MIRRORS="${MIRROR}" -BOB_FEATURES="${BOB_FEATURES:-parallel-fetch nodoc noinfo noman binpkg-multi-instance}" -BOB_EMERGE_DEFAULT_OPTS="${BOB_EMERGE_DEFAULT_OPTS:--b -k --binpkg-respect-use=y}" +BOB_FEATURES='-parallel-fetch nodoc noinfo noman binpkg-multi-instance' +BOB_EMERGE_DEFAULT_OPTS='-b -k --binpkg-respect-use=y' # Timezone for build containers -BOB_TIMEZONE="${BOB_TIMEZONE:-UTC}" +BOB_TIMEZONE='UTC' # Options passed on to the make jobs launched from Portage # -jX = number of cpu cores used for compiling, rule of thumb: amount_of_cores+1, i.e. -j9 -BOB_MAKEOPTS="${BOB_MAKEOPTS:--j9}" +BOB_MAKEOPTS='-j9' diff --git a/kubler.sh b/kubler.sh index 7014fa5c..715ba94b 100755 --- a/kubler.sh +++ b/kubler.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2014-2017, Erik Dannenberg +# Copyright (c) 2014-2019, Erik Dannenberg # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the @@ -10,7 +10,7 @@ # disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -# following disclaimer in the documentation and/or other materials provided with the distribution. +# following disclaimer in the documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -29,6 +29,9 @@ # global : _some_var # function return : __function_name +readonly _KUBLER_VERSION=0.9.0-beta +readonly _KUBLER_BASH_MIN=4.2 + # shellcheck disable=SC1004 _help_header=' __ ___. .__ | | ____ _\_ |__ | | ___________ @@ -41,14 +44,16 @@ function show_help() { local help_commands header_current_cmd help_commands="Commands: -build - Build image(s) or namespace(s) -clean - Remove build artifacts, like rootfs.tar, from all namespaces -new - Create a new namespace, image or builder -push - Push image(s) or namespace(s) to a registry -update - Check for stage3 updates and sync portage container +build - Build image(s) or namespace(s) +clean - Remove build artifacts and/or delete built images +dep-graph - Visualize image dependencies +new - Create a new namespace, image or builder +push - Push image(s) or namespace(s) to a registry +update - Check for new stage3 releases and kubler namespace updates -${_KUBLER_BIN} --help for more information\\n" +${_KUBLER_BIN} --help for more information on specific commands\\n" # shellcheck disable=SC2154 + header_current_cmd="${_KUBLER_VERSION}" [[ "${_is_valid_cmd}" == 'true' ]] && header_current_cmd=" ${_arg_command}" echo -e "${_help_header}${header_current_cmd}\\n" [[ -n "${_help_command_description}" ]] && echo -e "${_help_command_description}\\n" @@ -78,26 +83,71 @@ function get_absolute_path() { done # handle ./ or ../ regex='^[.]{1,2}\/?$' - [[ "${path_in}" =~ $regex ]] && path_out="$(dirname "${path_out}")" + [[ "${path_in}" =~ ${regex} ]] && path_out="$(dirname "${path_out}")" # and once more if ../ regex='^[.]{2}\/?$' - [[ "${path_in}" =~ $regex ]] && path_out="$(dirname "${path_out}")" + [[ "${path_in}" =~ ${regex} ]] && path_out="$(dirname "${path_out}")" __get_absolute_path="${path_out}" } +# https://stackoverflow.com/a/44660519/5731095 +# Compares two tuple-based, dot-delimited version numbers a and b (possibly +# with arbitrary string suffixes). Returns: +# 1 if ab +# Everything after the first character not in [0-9.] is compared +# lexicographically using ASCII ordering if the tuple-based versions are equal. +# +# Arguments: +# 1: version_one +# 2: version_two +function compare_versions() { + if [[ "$1" == "$2" ]]; then + return 2 + fi + local IFS=. + # shellcheck disable=SC2206 + local i a=(${1%%[^0-9.]*}) b=(${2%%[^0-9.]*}) + local arem=${1#${1%%[^0-9.]*}} brem=${2#${2%%[^0-9.]*}} + for ((i=0; i<${#a[@]} || i<${#b[@]}; i++)); do + if ((10#${a[i]:-0} < 10#${b[i]:-0})); then + return 1 + elif ((10#${a[i]:-0} > 10#${b[i]:-0})); then + return 3 + fi + done + if [ "$arem" '<' "$brem" ]; then + return 1 + elif [ "$arem" '>' "$brem" ]; then + return 3 + fi + return 2 +} + # Arguments: # 1: exit_message as string # 2: exit_code as int, optional, default: 1 function die() { - local exit_code + local exit_message exit_code + exit_message="$1" exit_code="${2:-1}" [[ "$_PRINT_HELP" = 'yes' ]] && show_help >&2 - echo -e 'fatal:' "$1" >&2 + if [[ -n "${exit_message}" ]]; then + if declare -F msg_error &>/dev/null; then + msg_error "fatal: ${exit_message}" >&2 + else + echo -e 'fatal:' "${exit_message}" >&2 + fi + fi + [[ "${KUBLER_BELL_ON_ERROR}" == 'true' ]] && tput bel + _kubler_internal_abort='true' exit "${exit_code}" } function main() { - (( ${BASH_VERSION%%.*} >= 4 )) || die "Kubler needs Bash version 4 or greater, only found version ${BASH_VERSION}." + compare_versions "${BASH_VERSION}" "${_KUBLER_BASH_MIN}" + [[ $? -eq 1 ]] && die "Kubler needs Bash version ${_KUBLER_BASH_MIN} or greater, installed is ${BASH_VERSION}." get_absolute_path "$0" [[ -z "${__get_absolute_path}" ]] && die "Couldn't determine the script's real directory, aborting" 2 @@ -114,14 +164,18 @@ function main() { [[ -d "${lib_dir}" ]] || die "Couldn't find ${lib_dir}" 2 readonly _LIB_DIR="${lib_dir}" + KUBLER_DATA_DIR="${KUBLER_DATA_DIR:-${HOME}/.kubler}" + [[ ! -d "${KUBLER_DATA_DIR}" ]] && mkdir -p "${KUBLER_DATA_DIR}/{cmd/argbash,engine,log,ns,tmp}" + [[ ! -d "${KUBLER_DATA_DIR}" ]] && "Couldn't create KUBLER_DATA_DIR at ${KUBLER_DATA_DIR}" + core="${_LIB_DIR}"/core.sh [[ -f "${core}" ]] || die "Couldn't read ${core}" 2 # shellcheck source=lib/core.sh source "${core}" # parse main args - parser="${_LIB_DIR}"/argbash/opt-main.sh - # shellcheck source=lib/argbash/opt-main.sh + parser="${_LIB_DIR}"/cmd/argbash/opt-main.sh + # shellcheck source=lib/cmd/argbash/opt-main.sh file_exists_or_die "${parser}" && source "${parser}" if [[ "${_arg_debug}" == 'on' ]]; then @@ -131,36 +185,44 @@ function main() { readonly BOB_IS_DEBUG='false' fi - # handle --help for main script - [[ -z "${_arg_command}" && "${_arg_help}" == 'on' ]] && { show_help; exit 0; } - # KUBLER_WORKING_DIR overrides --working-dir, else use current working directory get_absolute_path "${KUBLER_WORKING_DIR:-${_arg_working_dir}}" working_dir="${__get_absolute_path}" [[ -z "${working_dir}" ]] && working_dir="${PWD}" detect_namespace "${working_dir}" - + + # handle --help for main script + [[ -z "${_arg_command}" && "${_arg_help}" == 'on' ]] && { bc_helper; show_help; exit 0; } + if [[ -n "${_arg_working_dir}" ]]; then # shellcheck disable=SC2034 readonly _KUBLER_BIN_HINT=" --working-dir=${working_dir}" fi # valid command? - cmd_script="${_LIB_DIR}/cmd/${_arg_command}.sh" - [[ -f "${cmd_script}" ]] || { show_help; die "Unknown command, ${_arg_command}" 5; } + get_include_path "cmd/${_arg_command}.sh" || { show_help; die "Unknown command, ${_arg_command}" 5; } + cmd_script="${__get_include_path}" _is_valid_cmd='true' # parse command args if a matching parser exists - parser="${_LIB_DIR}/argbash/${_arg_command}.sh" - # shellcheck source=lib/argbash/build.sh + get_include_path "cmd/argbash/${_arg_command}.sh" + parser="${__get_include_path}" + # shellcheck source=lib/cmd/argbash/build.sh [[ -f "${parser}" ]] && source "${parser}" "${_arg_leftovers[@]}" + # for this setting env overrides args + [[ "${KUBLER_VERBOSE}" == 'true' ]] && _arg_verbose='on' + # handle --help for command script [[ "${_arg_help}" == 'on' ]] && { show_help; exit 0; } - # run command + [[ "${_arg_verbose}" == 'off' ]] && file_exists_and_truncate "${_KUBLER_LOG_DIR}/${_arg_command}.log" + + # run the selected command + trap "{ kubler_abort_handler; }" EXIT # shellcheck source=lib/cmd/build.sh source "${cmd_script}" "${_arg_leftovers[@]}" + trap ' ' EXIT } main "$@" diff --git a/lib/argbash/clean.sh b/lib/argbash/clean.sh deleted file mode 100755 index 952f89dd..00000000 --- a/lib/argbash/clean.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -_help_command_description="Remove build artifacts like rootfs.tar from all namespaces" - -# ARG_HELP([]) -# ARGBASH_WRAP([opt-global]) -# ARGBASH_SET_INDENT([ ]) -# ARGBASH_GO() -# needed because of Argbash --> m4_ignore([ -### START OF CODE GENERATED BY Argbash v2.3.0 one line above ### -# Argbash is a bash code generator used to get arguments parsing right. -# Argbash is FREE SOFTWARE, see https://argbash.io for more info - -# THE DEFAULTS INITIALIZATION - OPTIONALS -_arg_working_dir= -_arg_debug=off - -print_help () -{ - printf 'Usage: %s clean [-w|--working-dir ] [--debug]\n' "${_KUBLER_BIN}" - printf "\t%s\n" "-h,--help: Prints help" - printf "\t%s\n" "-w,--working-dir: Where to look for namespaces or images, default: current directory" -} - -# THE PARSING ITSELF -while test $# -gt 0 -do - _key="$1" - case "$_key" in - -h*|--help) - print_help - exit 0 - ;; - -w*|--working-dir|--working-dir=*) - _val="${_key##--working-dir=}" - _val2="${_key##-w}" - if test "$_val" = "$_key" - then - test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 - _val="$2" - shift - elif test "$_val2" != "$_key" -a -n "$_val2" - then - _val="$_val2" - fi - _arg_working_dir="$_val" - _args_opt_global_opt+=("${_key%%=*}" "$_arg_working_dir") - ;; - --no-debug|--debug) - _arg_debug="on" - _args_opt_global_opt+=("${_key%%=*}") - test "${1:0:5}" = "--no-" && _arg_debug="off" - ;; - *) - _PRINT_HELP=yes die "FATAL ERROR: Got an unexpected argument '$1'" 1 - ;; - esac - shift -done - -# OTHER STUFF GENERATED BY Argbash -_args_opt_global=("${_args_opt_global_opt[@]}" "${_args_opt_global_pos[@]}") - -### END OF CODE GENERATED BY Argbash (sortof) ### ]) diff --git a/lib/bob-core/Dockerfile.template b/lib/bob-core/Dockerfile.template index a693f734..e2e821ab 100644 --- a/lib/bob-core/Dockerfile.template +++ b/lib/bob-core/Dockerfile.template @@ -25,8 +25,6 @@ COPY build-root.sh /usr/local/bin/kubler-build-root COPY bashrc.sh /root/.bashrc -COPY acserver-push.sh /usr/local/bin/acserver-push - COPY portage-git-sync.sh /usr/local/bin/portage-git-sync CMD ["/bin/bash"] diff --git a/lib/bob-core/acserver-push.sh b/lib/bob-core/acserver-push.sh deleted file mode 100755 index 014ac70b..00000000 --- a/lib/bob-core/acserver-push.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -function main() { - local image_id image_path image_name manifest_name registry_host completed_url upload_id - image_id="$1" - image_path="$2" - image_name=$(basename -- "${image_path}") - manifest_name="${image_name/.aci/.manifest}" - registry_host='localhost' - - completed_url=$(curl -s -X POST -H "Content-Type: application/json" \ - -d "\\{\"image\":${image_id}\\}" "${registry_host}/${image_id}/startupload" | jq -r '.completed_url') - upload_id="${completed_url#*/complete/}" - - echo "uploading ${image_name}" - curl "${registry_host}/aci/${upload_id}" --upload-file "${image_path}" - echo "uploading ${manifest_name}" - curl "${registry_host}/manifest/${upload_id}" --upload-file "${manifest_name}" - echo "finish upload" - curl -X POST -H "Content-Type: application/json" -d '{"success":true}' "${registry_host}/complete/${upload_id}" -} - -main "$@" diff --git a/lib/bob-core/build-root.sh b/lib/bob-core/build-root.sh index e00b4a38..7fe9028d 100755 --- a/lib/bob-core/build-root.sh +++ b/lib/bob-core/build-root.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2014-2017, Erik Dannenberg +# Copyright (c) 2014-2019, Erik Dannenberg # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the @@ -10,7 +10,7 @@ # disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -# following disclaimer in the documentation and/or other materials provided with the distribution. +# following disclaimer in the documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -547,8 +547,13 @@ function build_rootfs() { # shellcheck disable=SC1091 source /etc/profile - # call configure bob hook if declared in build.sh - declare -F configure_bob &>/dev/null && configure_bob + # call configure_builder hook if declared in build.sh + if declare -F configure_builder &>/dev/null; then + configure_builder + elif declare -F configure_bob &>/dev/null; then + # deprecated, but still supported for a while + configure_bob + fi # switch back to BOB_{CHOST,CFLAGS,CXXFLAGS} unset USE_BUILDER_FLAGS @@ -653,13 +658,16 @@ function build_rootfs() { local lib_dir for lib_dir in "${_EMERGE_ROOT}"/{"${_LIB}",usr/"${_LIB}"}; do - if [[ -z "${_keep_static_libs}" ]] && [[ -d "${lib_dir}" ]] && [[ "$(ls -A "${lib_dir}")" ]]; then + if [[ -z "${_keep_static_libs}" ]] && [[ -d "${lib_dir}" ]] && [[ -n "$(ls -A "${lib_dir}")" ]]; then find "${lib_dir}"/* -type f -name "*.a" -delete fi done + # just for less noise in the build output + eselect news read new 1> /dev/null + # if this is not an interactive build create the tar ball and clean up - if [[ -z "${BOB_IS_INTERACTIVE}" && "$(ls -A "${_EMERGE_ROOT}")" ]]; then + if [[ -z "${BOB_IS_INTERACTIVE}" && -n "$(ls -A "${_EMERGE_ROOT}")" ]]; then # make rootfs tar ball and copy to host tar -cpf "${_CONFIG}/rootfs.tar" -C "${_EMERGE_ROOT}" . chown "${BOB_HOST_UID}":"${BOB_HOST_GID}" "${_CONFIG}/rootfs.tar" diff --git a/lib/bob-core/etc/acserver.yml b/lib/bob-core/etc/acserver.yml deleted file mode 100644 index 37b6a7ba..00000000 --- a/lib/bob-core/etc/acserver.yml +++ /dev/null @@ -1,10 +0,0 @@ -api: - serverName: # if not provided, use dns of http requests - port: 80 - https: false - #username: # disable basic auth security if not provided - #password: -STORAGE: - rootPath: /oci-registry # where to store acis - unsigned: true # support unsigned acis - allowOverride: true # allow overriding aci that already exists in store diff --git a/lib/argbash/README.md b/lib/cmd/argbash/README.md similarity index 100% rename from lib/argbash/README.md rename to lib/cmd/argbash/README.md diff --git a/lib/argbash/argbash-refresh.sh b/lib/cmd/argbash/argbash-refresh.sh similarity index 100% rename from lib/argbash/argbash-refresh.sh rename to lib/cmd/argbash/argbash-refresh.sh diff --git a/lib/argbash/build.sh b/lib/cmd/argbash/build.sh similarity index 88% rename from lib/argbash/build.sh rename to lib/cmd/argbash/build.sh index 0e3032b7..3a0ee25c 100755 --- a/lib/argbash/build.sh +++ b/lib/cmd/argbash/build.sh @@ -26,6 +26,7 @@ _positionals=() _arg_target_id=('' ) # THE DEFAULTS INITIALIZATION - OPTIONALS _arg_interactive=off +_arg_interactive_no_deps=off _arg_no_deps=off _arg_force_image_build=off _arg_force_full_image_build=off @@ -39,9 +40,10 @@ _arg_debug=off print_help () { - printf 'Usage: %s build [--interactive] [--no-deps] [--force-image-build] [--force-full-image-build] [--clear-build-container] [--clear-everything] [--skip-gpg-check] [-e|--exclude ] [-w|--working-dir ] [--debug] [] ... [] ...\n' "${_KUBLER_BIN}" + printf 'Usage: %s build [--interactive] [--interactive-no-deps] [--no-deps] [--force-image-build] [--force-full-image-build] [--clear-build-container] [--clear-everything] [--skip-gpg-check] [-e|--exclude ] [-w|--working-dir ] [--debug] [] ... [] ...\n' "${_KUBLER_BIN}" printf "\t%s\n" ": Namespace or image to build, i.e. myns or myns/myimage" - printf "\t%s\n" "-i,--interactive: Starts an interactive phase 1 build container. Note: It's parent image/builder has to be built already" + printf "\t%s\n" "-i,--interactive: Starts an interactive phase 1 build container." + printf "\t%s\n" "-I,--interactive-no-deps: Same as -i but skip any parents. Note: The parent image/builder has to be built already" printf "\t%s\n" "-n,--no-deps: Ignore all parent images and only build passed target-id(s), needs fully qualified target_ids" printf "\t%s\n" "-f,--force-image-build: Rebuild any existing images of current dependency graph" printf "\t%s\n" "-F,--force-full-image-build: Same as -f but also repeat the first build phase if a cached rootfs.tar exists" @@ -65,6 +67,12 @@ do test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-i" "-${_next}" "$@" test "${1:0:5}" = "--no-" && _arg_interactive="off" ;; + -I*|--no-interactive-no-deps|--interactive-no-deps) + _arg_interactive_no_deps="on" + _next="${_key##-I}" + test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-I" "-${_next}" "$@" + test "${1:0:5}" = "--no-" && _arg_interactive_no_deps="off" + ;; -n*|--no-no-deps|--no-deps) _arg_no_deps="on" _next="${_key##-n}" @@ -101,10 +109,10 @@ do test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-s" "-${_next}" "$@" test "${1:0:5}" = "--no-" && _arg_skip_gpg_check="off" ;; - -v*|--no-verbose-build|--verbose-build) + -V*|--no-verbose-build|--verbose-build) _arg_verbose_build="on" - _next="${_key##-v}" - test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-v" "-${_next}" "$@" + _next="${_key##-V}" + test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-V" "-${_next}" "$@" test "${1:0:5}" = "--no-" && _arg_verbose_build="off" ;; -e*|--exclude|--exclude=*) diff --git a/lib/cmd/argbash/clean.sh b/lib/cmd/argbash/clean.sh new file mode 100755 index 00000000..e64ac19d --- /dev/null +++ b/lib/cmd/argbash/clean.sh @@ -0,0 +1,112 @@ +#!/bin/bash + +_help_command_description="Delete build artifacts like rootfs.tar from all namespaces and/or delete built docker images" + +# ARG_HELP([]) +# ARGBASH_WRAP([opt-global]) +# ARGBASH_SET_INDENT([ ]) +# ARGBASH_GO() +# needed because of Argbash --> m4_ignore([ +### START OF CODE GENERATED BY Argbash v2.3.0 one line above ### +# Argbash is a bash code generator used to get arguments parsing right. +# Argbash is FREE SOFTWARE, see https://argbash.io for more info + +# THE DEFAULTS INITIALIZATION - OPTIONALS +_arg_build_artifacts=off +_arg_prune_dangling_images=off +_arg_image_ns=() +_arg_all_images=off +_arg_nuke_from_orbit=off +_arg_working_dir= +_arg_debug=off + +print_help () +{ + printf 'Usage: %s clean [-b|--build-artifacts] [-p|--prune-dangling-images] [-i|--image-ns ] [-I|--all-images] [-N|--nuke-from-orbit] [-w|--working-dir ]\n' "${_KUBLER_BIN}" + printf "\t%s\n" "-b,--build-artifacts: Delete rootfs.tar, Dockerfile and PACKAGES.md files, this is the default and can be omitted" + printf "\t%s\n" "-p,--prune-dangling-images: Run docker image prune" + printf "\t%s\n" "-i,--image-ns: Delete all Docker images for given namespace (repeatable)" + printf "\t%s\n" "-I,--all-images: Delete all Kubler related images for all namespaces, except stage3 base images and portage" + printf "\t%s\n" "-N,--nuke-from-orbit: Same as activating all of the above options, also deletes stage3 base images and portage" + printf "\t%s\n" "-w,--working-dir: Where to look for namespaces or images, default: current directory" + printf "\t%s\n" "-h,--help: Prints help" +} + +# THE PARSING ITSELF +while test $# -gt 0 +do + _key="$1" + case "$_key" in + -b*|--no-build-artifacts|--build-artifacts) + _arg_build_artifacts="on" + _next="${_key##-b}" + test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-b" "-${_next}" "$@" + test "${1:0:5}" = "--no-" && _arg_build_artifacts="off" + ;; + -p*|--no-prune-dangling-images|--prune-dangling-images) + _arg_prune_dangling_images="on" + _next="${_key##-p}" + test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-p" "-${_next}" "$@" + test "${1:0:5}" = "--no-" && _arg_prune_dangling_images="off" + ;; + -i*|--image-ns|--image-ns=*) + _val="${_key##--image-ns=}" + _val2="${_key##-i}" + if test "$_val" = "$_key" + then + test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 + _val="$2" + shift + elif test "$_val2" != "$_key" -a -n "$_val2" + then + _val="$_val2" + fi + _arg_image_ns+=("$_val") + ;; + -I*|--no-all-images|--all-images) + _arg_all_images="on" + _next="${_key##-I}" + test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-I" "-${_next}" "$@" + test "${1:0:5}" = "--no-" && _arg_all_images="off" + ;; + -N*|--no-nuke-from-orbit|--nuke-from-orbit) + _arg_nuke_from_orbit="on" + _next="${_key##-N}" + test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-N" "-${_next}" "$@" + test "${1:0:5}" = "--no-" && _arg_nuke_from_orbit="off" + ;; + -h*|--help) + print_help + exit 0 + ;; + -w*|--working-dir|--working-dir=*) + _val="${_key##--working-dir=}" + _val2="${_key##-w}" + if test "$_val" = "$_key" + then + test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 + _val="$2" + shift + elif test "$_val2" != "$_key" -a -n "$_val2" + then + _val="$_val2" + fi + _arg_working_dir="$_val" + _args_opt_global_opt+=("${_key%%=*}" "$_arg_working_dir") + ;; + --no-debug|--debug) + _arg_debug="on" + _args_opt_global_opt+=("${_key%%=*}") + test "${1:0:5}" = "--no-" && _arg_debug="off" + ;; + *) + _PRINT_HELP=yes die "FATAL ERROR: Got an unexpected argument '$1'" 1 + ;; + esac + shift +done + +# OTHER STUFF GENERATED BY Argbash +_args_opt_global=("${_args_opt_global_opt[@]}" "${_args_opt_global_pos[@]}") + +### END OF CODE GENERATED BY Argbash (sortof) ### ]) diff --git a/lib/cmd/argbash/dep-graph.sh b/lib/cmd/argbash/dep-graph.sh new file mode 100755 index 00000000..2a3c4592 --- /dev/null +++ b/lib/cmd/argbash/dep-graph.sh @@ -0,0 +1,125 @@ +#!/bin/bash + +_help_command_description="Visualize image dependencies via dot graph markup" + +# ARG_POSITIONAL_INF([target-id],[Namespace or image to graph, i.e. myns or myns/myimage],[1]) +# ARG_OPTIONAL_BOOLEAN([verbose-build],[v],[Show all build output]) +# ARG_HELP([]) +# ARGBASH_WRAP([opt-global]) +# ARGBASH_SET_INDENT([ ]) +# ARGBASH_GO() +# needed because of Argbash --> m4_ignore([ +### START OF CODE GENERATED BY Argbash v2.3.0 one line above ### +# Argbash is a bash code generator used to get arguments parsing right. +# Argbash is FREE SOFTWARE, see https://argbash.io for more info + +# THE DEFAULTS INITIALIZATION - POSITIONALS +_positionals=() +_arg_target_id=('' ) +# THE DEFAULTS INITIALIZATION - OPTIONALS +_arg_as_raw_dot=off +_arg_as_ascii=off +_arg_as_boxart=off +_arg_output= +_arg_working_dir= +_arg_debug=off + +print_help () +{ + printf 'Usage: %s dep-graph [-d|--as-raw-dot] [-a|--as-ascii] [-b|--as-boxart] [-o|--output-file] [-w|--working-dir ] [--debug] [] ... [] ...\n' "${_KUBLER_BIN}" + printf "\t%s\n" ": Namespace or image for the graph, i.e. myns or myns/myimage" + printf "\t%s\n" "-d,--as-dot: Output dot markup only" + printf "\t%s\n" "-a,--as-ascii: ASCII output via kubler/graph-easy image" + printf "\t%s\n" "-b,--as-boxart: Unicode ASCII output kubler/graph-easy image" + printf "\t%s\n" "-o,--output-file: Specify an output file, if omitted stdout is used for ascii/boxart" + printf "\t%s\n" "-h,--help: Prints help" + printf "\t%s\n" "-w,--working-dir: Where to look for namespaces or images, default: current directory" +} + +# THE PARSING ITSELF +while test $# -gt 0 +do + _key="$1" + case "$_key" in + -r*|--no-as-raw-dot|--as-raw-dot) + _arg_as_raw_dot="on" + _next="${_key##-r}" + test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-r" "-${_next}" "$@" + test "${1:0:5}" = "--no-" && _arg_as_raw_dot="off" + ;; + -a*|--no-as-ascii|--as-ascii) + _arg_as_ascii="on" + _next="${_key##-a}" + test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-a" "-${_next}" "$@" + test "${1:0:5}" = "--no-" && _arg_as_ascii="off" + ;; + -b*|--no-as-boxart|--as-boxart) + _arg_as_boxart="on" + _next="${_key##-b}" + test -n "$_next" && test "$_next" != "$_key" && shift && set -- "-b" "-${_next}" "$@" + test "${1:0:5}" = "--no-" && _arg_as_boxart="off" + ;; + -h*|--help) + print_help + exit 0 + ;; + -o*|--output-file|--output-file=*) + _val="${_key##--output-file=}" + _val2="${_key##-o}" + if test "$_val" = "$_key" + then + test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 + _val="$2" + shift + elif test "$_val2" != "$_key" -a -n "$_val2" + then + _val="$_val2" + fi + _arg_output_file="$_val" + _args_opt_global_opt+=("${_key%%=*}" "$_arg_output_file") + ;; + -w*|--working-dir|--working-dir=*) + _val="${_key##--working-dir=}" + _val2="${_key##-w}" + if test "$_val" = "$_key" + then + test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 + _val="$2" + shift + elif test "$_val2" != "$_key" -a -n "$_val2" + then + _val="$_val2" + fi + _arg_working_dir="$_val" + _args_opt_global_opt+=("${_key%%=*}" "$_arg_working_dir") + ;; + --no-debug|--debug) + _arg_debug="on" + _args_opt_global_opt+=("${_key%%=*}") + test "${1:0:5}" = "--no-" && _arg_debug="off" + ;; + *) + _positionals+=("$1") + ;; + esac + shift +done + +_positional_names=('_arg_target_id' ) +_required_args_string="'target-id'" +[[ ${_arg_help} != on ]] && test ${#_positionals[@]} -lt 1 && _PRINT_HELP=yes die "Not enough positional arguments - we require at least 1 (namely: $_required_args_string), but got only ${#_positionals[@]}." 1 +_our_args=$((${#_positionals[@]} - ${#_positional_names[@]})) +for (( ii = 0; ii < _our_args; ii++)) +do + _positional_names+=("_arg_target_id[(($ii + 1))]") +done + +for (( ii = 0; ii < ${#_positionals[@]}; ii++)) +do + eval "${_positional_names[ii]}=\${_positionals[ii]}" || die "Error during argument parsing, possibly an Argbash bug." 1 +done + +# OTHER STUFF GENERATED BY Argbash +_args_opt_global=("${_args_opt_global_opt[@]}" "${_args_opt_global_pos[@]}") + +### END OF CODE GENERATED BY Argbash (sortof) ### ]) diff --git a/lib/argbash/new.sh b/lib/cmd/argbash/new.sh similarity index 100% rename from lib/argbash/new.sh rename to lib/cmd/argbash/new.sh diff --git a/lib/argbash/opt-global.m4 b/lib/cmd/argbash/opt-global.m4 similarity index 100% rename from lib/argbash/opt-global.m4 rename to lib/cmd/argbash/opt-global.m4 diff --git a/lib/argbash/opt-main.sh b/lib/cmd/argbash/opt-main.sh similarity index 89% rename from lib/argbash/opt-main.sh rename to lib/cmd/argbash/opt-main.sh index 012d84d0..a818a65a 100755 --- a/lib/argbash/opt-main.sh +++ b/lib/cmd/argbash/opt-main.sh @@ -20,6 +20,7 @@ _arg_leftovers=() _arg_help=off _arg_working_dir= _arg_debug=off +_arg_verbose=off print_help () { @@ -28,6 +29,7 @@ print_help () printf "\t%s\n" "... : command-options" printf "\t%s\n" "-h,--help: Prints help" printf "\t%s\n" "-w,--working-dir: Where to look for namespaces or images, default: current directory" + printf "\t%s\n" "-v,--verbose: More insight at the cost of noisier output, default: off" } # THE PARSING ITSELF @@ -56,11 +58,16 @@ do _arg_working_dir="$_val" _args_opt_global_opt+=("${_key%%=*}" "$_arg_working_dir") ;; - --no-debug|--debug) + -d*|--no-debug|--debug) _arg_debug="on" _args_opt_global_opt+=("${_key%%=*}") test "${1:0:5}" = "--no-" && _arg_debug="off" ;; + -v*|--no-verbose|--verbose) + _arg_verbose="on" + _args_opt_global_opt+=("${_key%%=*}") + test "${1:0:5}" = "--no-" && _arg_verbose="off" + ;; *) _positionals+=("$1") ;; diff --git a/lib/argbash/push.sh b/lib/cmd/argbash/push.sh similarity index 100% rename from lib/argbash/push.sh rename to lib/cmd/argbash/push.sh diff --git a/lib/argbash/update.sh b/lib/cmd/argbash/update.sh similarity index 100% rename from lib/argbash/update.sh rename to lib/cmd/argbash/update.sh diff --git a/lib/cmd/build.sh b/lib/cmd/build.sh index b256c4ea..579ae203 100644 --- a/lib/cmd/build.sh +++ b/lib/cmd/build.sh @@ -1,82 +1,107 @@ #!/usr/bin/env bash -# Copyright (c) 2014-2017, Erik Dannenberg +# Copyright (c) 2014-2019, Erik Dannenberg # All rights reserved. -_required_binaries=" bzip2 grep id wget" +_required_binaries=" bzip2 grep id jq wget" # shellcheck disable=SC2154 [[ "${_arg_skip_gpg_check}" != "on" ]] && _required_binaries+=" gpg" -[[ $(command -v sha512sum) ]] && _required_binaries+=" sha512sum" || _required_binaries+=" shasum" +[[ -n "$(command -v sha512sum)" ]] && _required_binaries+=" sha512sum" || _required_binaries+=" shasum" -# Populate _build_order and _build_order_builder by checking image dependencies +# Populate _build_order_images and _build_order_builders by checking image dependencies for given image target_ids # # Arguments: -# -# 1: images - fully qualified ids, space separated +# n: target_ids - fully qualified ids function generate_build_order() { - local images image_id builder_id excluded_image - images="$1" - # generate image build order - _required_builder="" - _required_engines="" - for image_id in ${images}; do + local target_ids image_id builder_id engine_id excluded_image + target_ids=( "$@" ) + _build_order_images=() + _required_images=() + _required_builders=() + _required_engines=() + # add builder for interactive dep graph + if [[ "${#_required_builders_interactive[@]}" -gt 0 ]]; then + for builder_id in "${!_required_builders_interactive[@]}"; do + _required_builders["${builder_id}"]="${_required_builders_interactive[${builder_id}]}" + done + fi + # add engine for interactive dep graph + if [[ "${#_required_engines_interactive[@]}" -gt 0 ]]; then + for engine_id in "${!_required_engines_interactive[@]}"; do + _required_engines["${engine_id}"]="${_required_engines_interactive[${engine_id}]}" + done + fi + for image_id in "${target_ids[@]}"; do check_image_dependencies "${image_id}" - if [ -z "$_build_order" ]; then - _build_order="${image_id}" - else - ! string_has_word "${_build_order}" "${image_id}" && _build_order+=" ${image_id}" + if [[ -z "${_required_images[$image_id]+_}" ]]; then + expand_image_id "${image_id}" "${_IMAGE_PATH}" + # shellcheck disable=SC2154 + _required_images["${image_id}"]="${__expand_image_id}" + _build_order_images+=( "${image_id}" ) fi done # generate builder build order - _build_order_builder="" - _required_cores="" - for builder_id in ${_required_builder}; do + _build_order_builders=() + _required_cores=() + for builder_id in "${!_required_builders[@]}"; do check_builder_dependencies "${builder_id}" - if [ -z "$_build_order_builder" ]; then - _build_order_builder="${builder_id}" - else - ! string_has_word "${_build_order_builder}" "${builder_id}" && _build_order_builder+=" ${builder_id}" + if ! is_in_array "${builder_id}" "${_build_order_builders[@]}"; then + expand_image_id "${builder_id}" "${_BUILDER_PATH}" + _required_builders["${builder_id}"]="${__expand_image_id}" + _build_order_builders+=( "${builder_id}" ) fi done # shellcheck disable=SC2154 for excluded_image in "${_arg_exclude[@]}";do - _build_order="${_build_order/${excluded_image}/}" + if [[ -n "${_required_images[${excluded_image}]+_}" ]]; then + unset _required_images["${excluded_image}"] + rm_array_value "${excluded_image}" "${_build_order_images[@]}" + _build_order_images=( "${__rm_array_value[@]}" ) + fi done - read -r _build_order <<< "${_build_order}" } -# Check image dependencies and populate _build_order, _required_builder and _required_engines. Recursive. +# Check image dependencies and populate _required_images, _required_builders and _required_engines. Recursive. # # Arguments: -# # 1: image_id # 2: previous_image_id function check_image_dependencies() { - local image_id previous_image + local image_id previous_image current_image_path image_id="$1" previous_image="$2" - expand_image_id "${image_id}" "${_IMAGE_PATH}" - if [ "${image_id}" != "scratch" ]; then + if [[ "${image_id}" != 'scratch' ]]; then + expand_image_id "${image_id}" "${_IMAGE_PATH}" + current_image_path="${__expand_image_id}" # shellcheck disable=SC2154 - source_image_conf "${__expand_image_id}" + source_image_conf "${current_image_path}" # collect required engines - ! string_has_word "${_required_engines}" "${BUILD_ENGINE}" && _required_engines+=" ${BUILD_ENGINE}" + [[ -z "${_required_engines[${BUILD_ENGINE}]+_}" ]] && _required_engines["${BUILD_ENGINE}"]="${BUILD_ENGINE}" # collect required build containers if [[ -n "${BUILDER}" ]];then - ! string_has_word "${_required_builder}" "${BUILDER}" && _required_builder+=" ${BUILDER}" + if [[ -z "${_required_builders[${BUILDER}]+_}" ]]; then + expand_image_id "${BUILDER}" "${_BUILDER_PATH}" + _required_builders["${BUILDER}"]="${__expand_image_id}" + fi else # add default build container of current namespace - ! string_has_word "${_required_builder}" "${DEFAULT_BUILDER}" && _required_builder+=" ${DEFAULT_BUILDER}" + if [[ -z "${_required_builders[${DEFAULT_BUILDER}]+_}" ]]; then + expand_image_id "${DEFAULT_BUILDER}" "${_BUILDER_PATH}" + _required_builders["${DEFAULT_BUILDER}"]="${__expand_image_id}" + fi fi if [[ -n "${IMAGE_PARENT}" ]]; then # skip further checking if already processed - if ! string_has_word "${_build_order}" "${image_id}"; then + if [[ -z "${_required_images[${image_id}]+_}" ]]; then # check parent image dependencies check_image_dependencies "${IMAGE_PARENT}" "${image_id}" # finally add the image - [[ "${previous_image}" != "" ]] && _build_order+=" ${image_id}" + if [[ -n "${previous_image}" ]]; then + _required_images["${image_id}"]="${current_image_path}" + _build_order_images+=( "${image_id}" ) + fi fi fi fi @@ -95,57 +120,48 @@ function check_builder_dependencies() { expand_image_id "${builder_id}" "${_BUILDER_PATH}" source_image_conf "${__expand_image_id}" # is a stage3 defined for this builder? - [[ -n "${STAGE3_BASE}" ]] && ! string_has_word "${_required_cores}" "${STAGE3_BASE}" \ - && _required_cores+=" ${STAGE3_BASE}" + [[ -n "${STAGE3_BASE}" ]] && [[ -z "${_required_cores[${STAGE3_BASE}]+_}" ]] \ + && _required_cores["${STAGE3_BASE}"]="${STAGE3_BASE}" # skip further checking if already processed - if ! string_has_word "${_build_order_builder}" "${builder_id}"; then + if ! string_has_word "${_build_order_builders[*]}" "${builder_id}"; then # check parent if this is not a stage3 builder [[ -z "${STAGE3_BASE}" ]] && check_builder_dependencies "${BUILDER}" "${builder_id}" # finally add the builder - [[ -n "${previous_builder_id}" ]] && _build_order_builder+=" ${builder_id}" + if [[ -n "${previous_builder_id}" ]]; then + expand_image_id "${builder_id}" "${_BUILDER_PATH}" + _required_builders["${builder_id}"]="${__expand_image_id}" + _build_order_builders+=( "${builder_id}" ) + fi fi } -function main() { - local target_id build_type engine_id engines builder_id builders image_id images bob_var - - cd "${_NAMESPACE_DIR}" || die "Failed to change dir to ${_NAMESPACE_DIR}" +# Arguments: +# 1: target_id +# 2: image_path +# 3: image_type +function run_interactive_builder() { + local target_id image_path + target_id="$1" + image_path="$2" + image_type="${3:-${_IMAGE_PATH}}" - # shellcheck disable=SC2154 - [[ "${_arg_verbose_build}" == 'off' ]] && BOB_EMERGE_DEFAULT_OPTS="${BOB_EMERGE_DEFAULT_OPTS} --quiet-build" + add_status_value "${target_id}" - # --interactive build - # shellcheck disable=SC2154 - if [[ "${_arg_interactive}" == 'on' ]]; then # shellcheck disable=SC2034 BOB_IS_INTERACTIVE='true' - # shellcheck disable=SC2154 - target_id="${_arg_target_id}" - [[ "${target_id}" == "*" ]] && die "--interactive does not support wildcards, only fully qualified ids." - if [[ "${target_id}" != *"/"* ]]; then - if [[ -n "${_NAMESPACE_DEFAULT}" ]]; then - target_id="${_NAMESPACE_DEFAULT}/${target_id}" - else - die "--interactive expects an image, but only got a namespace." - fi - fi - build_type="${_IMAGE_PATH}" - expand_image_id "${target_id}" "${build_type}" - if [[ ! -d "${__expand_image_id}" ]]; then - expand_image_id "${target_id}" "${_BUILDER_PATH}" - [[ ! -d "${__expand_image_id}" ]] && die "Couldn't find image or builder ${target_id}" - build_type="${_BUILDER_PATH}" - fi - source_image_conf "${__expand_image_id}" + + source_image_conf "${image_path}" unset _use_parent_builder_mounts + # shellcheck disable=SC2034 [[ "${PARENT_BUILDER_MOUNTS}" == 'true' ]] && _use_parent_builder_mounts='true' - - get_build_container "${target_id}" "${build_type}" + get_build_container "${target_id}" "${image_type}" [[ $? -eq 1 ]] && die "Error while executing get_build_container(): ${builder_id}" + # shellcheck disable=SC2154 builder_id="${__get_build_container}" image_exists "${builder_id}" || die "Couldn't find image ${builder_id}" + # shellcheck disable=SC2034 BOB_CURRENT_TARGET="${target_id}" # pass variables starting with BOB_ to build container as ENV @@ -153,22 +169,22 @@ function main() { _container_env+=("${bob_var}=${!bob_var}") done - generate_dockerfile "${__expand_image_id}" + generate_dockerfile "${image_path}" - get_absolute_path "${__expand_image_id}" _container_mounts=( - "${_KUBLER_DIR}/tmp/distfiles:/distfiles" - "${_KUBLER_DIR}/tmp/packages:/packages" - "${_KUBLER_DIR}/tmp/oci-registry:/oci-registry" - "${__get_absolute_path}:/config" + "${KUBLER_DISTFILES_DIR}:/distfiles" + "${KUBLER_PACKAGES_DIR}:/packages" + "${image_path}:/config" ) [[ ${#BUILDER_MOUNTS[@]} -gt 0 ]] && _container_mounts+=("${BUILDER_MOUNTS[@]}") [[ ${#BUILDER_DOCKER_ARGS[@]} -gt 0 ]] && _container_args+=("${BUILDER_DOCKER_ARGS[@]}") + # shellcheck disable=SC2034 _container_mount_portage='true' - _container_cmd=('/bin/bash') + # shellcheck disable=SC2034 + _container_cmd=( '/bin/bash' ) - msg "using: ${BUILD_ENGINE} / builder: ${builder_id}" - msg "\\nRunning interactive build container with ${_NAMESPACE_DIR}/${__expand_image_id} mounted as /config" + msg_info "using: ${BUILD_ENGINE} / builder: ${builder_id}" + msg "\\nRunning interactive build container with ${image_path} mounted as /config" msg "Artifacts from previous builds: /backup-rootfs\\n" msg "You may run any helper function available in your image's build.sh, like update_use, etc." msg "Once you are finished tinkering, history | cut -c 8- may prove useful ;)\\n" @@ -176,8 +192,86 @@ function main() { msg " $ kubler-build-root \\n\\nNote: Starting a build twice in the same container is not recommended\\n" msg "Search packages: eix / Check use flags: emerge -pv \\n" - run_image "${builder_id}" "${builder_id}" 'true' - exit $? + run_image "${builder_id}:${IMAGE_TAG}" "${builder_id}" 'true' '' 'false' +} + +function main() { + local target_id target_path build_type engine_id builder_id image_id image_path bob_var init_msg + + cd "${_NAMESPACE_DIR}" || die "Failed to change dir to ${_NAMESPACE_DIR}" + + add_status_value 'init' + init_msg='generate build graph' + + # -i and -n equals -I + # shellcheck disable=SC2154 + [[ "${_arg_interactive}" == 'on' && "${_arg_no_deps}" == 'on' ]] && _arg_interactive_no_deps='on' + + # shellcheck disable=SC2154 + if [[ "${_arg_verbose_build}" == 'off' ]]; then + BOB_EMERGE_DEFAULT_OPTS="${BOB_EMERGE_DEFAULT_OPTS} --quiet-build" + else + # shellcheck disable=SC2034 + _arg_verbose='on' + fi + + # shellcheck disable=SC2154 + [[ "${_arg_clear_everything}" == 'on' ]] && _arg_clear_build_container='on' + # shellcheck disable=SC2034 + [[ "${_arg_clear_build_container}" == 'on' ]] && _arg_force_full_image_build='on' + + # clone kubler-images repo if non-existing + if [[ "${KUBLER_DISABLE_KUBLER_NS}" != 'true' ]] && ! is_git_dir "${_KUBLER_NAMESPACE_DIR}"/kubler; then + add_status_value 'kubler-images' + clone_or_update_git_repo "${_KUBLER_NS_GIT_URL}" "${_KUBLER_NAMESPACE_DIR}" 'kubler' + add_status_value + fi + + # prepare a --interactive build + # shellcheck disable=SC2154 + [[ "${_arg_interactive_no_deps}" == 'on' ]] && _arg_interactive='on' + if [[ "${_arg_interactive}" == 'on' ]]; then + target_id="${_arg_target_id}" + + if [[ "${target_id}" != *'/'* || "${target_id}" == *'/' ]]; then + if [[ -n "${_NAMESPACE_DEFAULT}" ]]; then + target_id="${_NAMESPACE_DEFAULT}/${target_id}" + else + die "--interactive expects an image id, but only got a namespace." + fi + fi + build_type="${_IMAGE_PATH}" + expand_image_id "${target_id}" "${build_type}" + if [[ $? -eq 3 ]]; then + # builder image_id is allowed for interactive runs, so let's retry with that + expand_image_id "${target_id}" "${_BUILDER_PATH}" || die "Couldn't find image or builder ${target_id}" + build_type="${_BUILDER_PATH}" + fi + target_path="${__expand_image_id}" + source_image_conf "${target_path}" + validate_image "${target_id}" "${target_path}" + + if [[ "${_arg_interactive_no_deps}" == 'on' || "${build_type}" != "${_IMAGE_PATH}" ]]; then + run_interactive_builder "${target_id}" "${target_path}" "${build_type}" + trap ' ' EXIT + exit $? + fi + declare -A _required_builders_interactive _required_engines_interactive + + # modify target_id args and required builders/engines for dep graph + _required_engines_interactive["${BUILD_ENGINE}"]="${BUILD_ENGINE}" + if [[ -n "${BUILDER}" ]]; then + expand_image_id "${BUILDER}" "${_BUILDER_PATH}" + _required_builders_interactive["${BUILDER}"]="${__expand_image_id}" + _arg_target_id=() + elif [[ -n "${IMAGE_PARENT}" && "${IMAGE_PARENT}" != 'scratch' ]]; then + _arg_target_id=( "${IMAGE_PARENT}" ) + else + expand_image_id "${DEFAULT_BUILDER}" "${_BUILDER_PATH}" + _required_builders_interactive["${DEFAULT_BUILDER}"]="${__expand_image_id}" + _arg_target_id=() + fi + init_msg+=" for interactive build of ${target_id}" fi # --no-deps build @@ -185,56 +279,64 @@ function main() { if [[ "${_arg_no_deps}" == 'on' ]]; then for image_id in "${_arg_target_id[@]}"; do [[ "${image_id}" == "*" ]] && die "--no-deps does not support wildcards, only fully qualified ids." - if [[ "${image_id}" != *"/"* ]]; then + if [[ "${image_id}" != *"/"* || "${image_id}" == *'/' ]]; then if [[ -n "${_NAMESPACE_DEFAULT}" ]]; then image_id="${_NAMESPACE_DEFAULT}/${image_id}" + [[ "${image_id}" == *'/' ]] && image_id="${image_id::-1}" else die "--no-deps expects a fully qualified image_id, but only got namespace \"${image_id}\"" fi fi expand_image_id "${image_id}" "${_IMAGE_PATH}" source_image_conf "${__expand_image_id}" - validate_image "${image_id}" "${_IMAGE_PATH}" - build_image_no_deps "${image_id}" + validate_image "${image_id}" "${__expand_image_id}" + build_image_no_deps "${image_id}" "${__expand_image_id}" done + trap ' ' EXIT exit $? fi - msg "*** generate build order" + msg_info "${init_msg}" + + declare -a _build_order_images _build_order_builders + declare -A _required_images _required_builders _required_cores _required_engines expand_requested_target_ids "${_arg_target_id[@]}" # shellcheck disable=SC2154 - generate_build_order "${__expand_requested_target_ids}" - msgf "required engines:" "${_required_engines:1}" - msgf "required stage3:" "${_required_cores:1}" - msgf "required builders:" "${_build_order_builder}" - msgf "build sequence:" "${_build_order}" - [[ -n ${_arg_exclude} ]] && msgf "excluded:" "${_arg_exclude[@]}" - - IFS=" " read -r -a engines <<< "${_required_engines}" - for engine_id in "${engines[@]}"; do - # shellcheck source=lib/engine/docker.sh - source "${_LIB_DIR}/engine/${engine_id}.sh" - validate_engine + generate_build_order "${__expand_requested_target_ids[@]}" + + msgf "required engines:" "${!_required_engines[*]}" + msgf "required stage3:" "${!_required_cores[*]}" + msgf "required builders:" "${_build_order_builders[*]}" + [[ "${#_build_order_images[@]}" -gt 0 ]] && msgf "build sequence:" "${_build_order_images[*]}" + [[ -n ${_arg_exclude} ]] && msgf "excluded:" "${_arg_exclude[*]}" + + for engine_id in "${!_required_engines[@]}"; do + source_build_engine "${engine_id}" + validate_engine done - msg "*** gogo!" + msg_ok 'done.' - IFS=" " read -r -a builders <<< "${_build_order_builder}" - for builder_id in "${builders[@]}"; do - expand_image_id "${builder_id}" "${_BUILDER_PATH}" - source_image_conf "${__expand_image_id}" - validate_image "${builder_id}" "${_BUILDER_PATH}" - build_builder "${builder_id}" + for builder_id in "${_build_order_builders[@]}"; do + image_path="${_required_builders[${builder_id}]}" + source_image_conf "${image_path}" + validate_image "${builder_id}" "${image_path}" + build_builder "${builder_id}" "${image_path}" + # shellcheck disable=SC2154 + [[ "${_arg_verbose}" == 'off' ]] && file_exists_and_truncate "${_KUBLER_LOG_DIR}/${_arg_command}.log" done - IFS=" " read -r -a images <<< "${_build_order}" - for image_id in "${images[@]}"; do - expand_image_id "${image_id}" "${_IMAGE_PATH}" - source_image_conf "${__expand_image_id}" - validate_image "${image_id}" "${_IMAGE_PATH}" - build_image "${image_id}" + for image_id in "${_build_order_images[@]}"; do + image_path="${_required_images[${image_id}]}" + source_image_conf "${image_path}" + validate_image "${image_id}" "${image_path}" + build_image "${image_id}" "${image_path}" + [[ "${_arg_verbose}" == 'off' ]] && file_exists_and_truncate "${_KUBLER_LOG_DIR}/${_arg_command}.log" done + + # shellcheck disable=SC2154 + [[ "${_arg_interactive}" == 'on' ]] && run_interactive_builder "${target_id}" "${target_path}" } main "$@" diff --git a/lib/cmd/clean.sh b/lib/cmd/clean.sh index 2b26016c..144dde3b 100644 --- a/lib/cmd/clean.sh +++ b/lib/cmd/clean.sh @@ -1,18 +1,94 @@ #!/usr/bin/env bash -# Copyright (c) 2014-2017, Erik Dannenberg +# Copyright (c) 2014-2019, Erik Dannenberg # All rights reserved. -function main() { +# Arguments: +# n: namespace_dirs - absolute paths +function rm_build_artifacts() { local namespace_dirs + namespace_dirs=( "$@" ) + find -L "${namespace_dirs[@]}" \( -name rootfs.tar -o -name Dockerfile -o -name PACKAGES.md \) -delete + return $? +} + +# Delete all existing Docker images for given namespace_id +# +# Arguments: +# 1: namespace_id +function rm_docker_namespace_images { + local namespace_id docker_out + namespace_id="$1" + [[ "${namespace_id}" != */ ]] && namespace_id="${namespace_id}/" + namespace_id="${namespace_id}*" + docker_out="$("${DOCKER}" images "${namespace_id}" -q)" + # shellcheck disable=SC2086 + [[ -n "${docker_out}" ]] && "${DOCKER}" rmi -f ${docker_out} + return $? +} + +function main() { + local namespace_dirs ns_dir ns_id namespace_dirs=( "${_NAMESPACE_DIR}" ) [[ "${_NAMESPACE_TYPE}" != 'local' ]] && namespace_dirs+=( "${_KUBLER_NAMESPACE_DIR}" ) - msg "--> remove build artifacts" - msg "rootfs.tar files" - find -L "${namespace_dirs[@]}" -name rootfs.tar -delete - msg "generated Dockerfiles" - find -L "${namespace_dirs[@]}" -name Dockerfile -delete - msg "PACKAGES.md files" - find -L "${namespace_dirs[@]}" -name PACKAGES.md -delete + + # shellcheck disable=SC2154 + [[ "${_arg_nuke_from_orbit}" == 'on' ]] && \ + _arg_build_artifacts='on' && _arg_prune_dangling_images='on' && _arg_all_images='on' && _arg_build_artifacts='on' + + # use -b as default if nothing else was passed + [[ "${_arg_build_artifacts}" == 'off' && "${_arg_prune_dangling_images}" == 'off' \ + && "${_arg_all_images}" == 'off' && "${#_arg_image_ns[@]}" -eq 0 ]] && _arg_build_artifacts='on' + + if [[ "${_arg_build_artifacts}" == 'on' ]]; then + add_status_value "build artifacts" + _status_msg="Delete rootfs.tar, generated Dockerfile and PACKAGES.md files" + pwrap rm_build_artifacts "${namespace_dirs[@]}" || die + msg_ok 'done.' + fi + + if [[ "${_arg_prune_dangling_images}" == 'on' ]]; then + add_status_value "dangling images" + source_build_engine 'docker' + _status_msg="exec docker image prune" + pwrap "${DOCKER}" image prune -f || die + msg_ok 'done.' + fi + + if [[ "${_arg_all_images}" == 'on' ]]; then + # -I overrides -i + _arg_image_ns=() + for ns_dir in "${namespace_dirs[@]}"; do + if [[ "${_NAMESPACE_TYPE}" != 'single' || "${ns_dir}" == "${_KUBLER_NAMESPACE_DIR}" ]]; then + pushd "${ns_dir}" 1> /dev/null || die + for ns_id in */; do + _arg_image_ns+=( "${ns_id}" ) + done + popd 1> /dev/null || die + else + _arg_image_ns+=( "$(basename -- "${ns_dir}")" ) + fi + done + fi + + if [[ "${#_arg_image_ns[@]}" -gt 0 ]]; then + source_build_engine 'docker' + add_status_value "built images" + for ns_id in "${_arg_image_ns[@]}"; do + ns_id="${ns_id%/}" + _status_msg="exec docker rmi -f ${ns_id}/*" + pwrap rm_docker_namespace_images "${ns_id}" + done + if [[ "${_arg_nuke_from_orbit}" == 'on' ]]; then + #stop_container "${_PORTAGE_CONTAINER}" + # shellcheck disable=SC2034 + _status_msg="exec docker container prune" + pwrap "${DOCKER}" container prune -f || die + # shellcheck disable=SC2034 + _status_msg="exec docker rmi -f ${_STAGE3_NAMESPACE}/*" + pwrap rm_docker_namespace_images "${_STAGE3_NAMESPACE}" + fi + msg_ok 'done.' + fi } main "$@" diff --git a/lib/cmd/dep-graph.sh b/lib/cmd/dep-graph.sh new file mode 100644 index 00000000..7f05000b --- /dev/null +++ b/lib/cmd/dep-graph.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +# Copyright (c) 2014-2019, Erik Dannenberg +# All rights reserved. + +# Check image dependencies and populate global var _dep_graph. Recursive. +# +# Arguments: +# +# 1: image_id +# 2: previous_image_id +function check_image_dependencies() { + local image_id previous_image + image_id="$1" + previous_image="$2" + if [[ "${image_id}" != 'scratch' ]]; then + expand_image_id "${image_id}" "${_IMAGE_PATH}" + # shellcheck disable=SC2154 + source_image_conf "${__expand_image_id}" + + if [[ -n "${IMAGE_PARENT}" ]]; then + # skip further checking if already processed + if ! is_in_array "${image_id}" "${_dep_graph[@]}"; then + # check parent image dependencies + check_image_dependencies "${IMAGE_PARENT}" "${image_id}" + # finally add the image + [[ "${previous_image}" != "" ]] && _dep_graph+=( "${image_id}" ) + fi + fi + fi +} + +function main() { + local dotstring output_file dot_exec + # shellcheck disable=SC2154 + output_file="${_arg_output_file}" + dot_exec=() + + # shellcheck disable=SC2154 + [[ -z "${output_file}" && "${_arg_as_raw_dot}" != 'on' && "${_arg_as_ascii}" != 'on' && "${_arg_as_boxart}" != 'on' ]] \ + && die "--output-file is required for png output" + + # shellcheck disable=SC2154 + expand_requested_target_ids "${_arg_target_id[@]}" + + declare -a _dep_graph + + # shellcheck disable=SC2154 + for image_id in "${__expand_requested_target_ids[@]}"; do + check_image_dependencies "${image_id}" + ! is_in_array "${image_id}" "${_dep_graph[@]}" && _dep_graph+=( "${image_id}" ) + done + + dotstring="strict digraph imagedeps {\n rankdir=LR;" + + for image_id in "${_dep_graph[@]}"; do + node_options='' + expand_image_id "${image_id}" + source_image_conf "${__expand_image_id}" + if [[ -n "${BUILDER}" ]]; then + node_options=" [label=\"${BUILDER}\"]" + elif [[ "${IMAGE_PARENT}" == 'scratch' ]]; then + node_options=" [label=\"${DEFAULT_BUILDER}\"]" + fi + dotstring="${dotstring}\n \"${IMAGE_PARENT}\" -> \"${image_id}\"${node_options};" + done + + [[ "${_arg_as_raw_dot}" != 'on' ]] && ! image_exists "${KUBLER_DEPGRAPH_IMAGE}" && {\ + msg_error "docker image ${KUBLER_DEPGRAPH_IMAGE} is required locally, to resolve this:"; msg_info_sub; + msg_info_sub "$ kubler build ${KUBLER_DEPGRAPH_IMAGE}"; + msg_info_sub; msg_info_sub "or"; msg_info_sub; + msg_info_sub "$ docker pull ${KUBLER_DEPGRAPH_IMAGE}"; msg_info_sub; + msg_info_sub "or use --as-raw-dot/-r which doesn't require a docker image"; msg_info_sub; + die; } + + dotstring="${dotstring}\n}" + + dot_exec+=( 'graph-easy' '--from' 'dot' ) + if [[ "${_arg_as_ascii}" == 'on' ]]; then + dot_exec+=( '--as_ascii' ) + elif [[ "${_arg_as_boxart}" == 'on' ]]; then + dot_exec+=( '--as_boxart' ) + else + dot_exec=( 'dot' '-Tpng' ) + fi + + if [[ "${_arg_as_raw_dot}" == 'on' && -n "${output_file}" ]]; then + echo -e "${dotstring}" > "${output_file}" + elif [[ "${_arg_as_raw_dot}" == 'on' ]]; then + echo -e "${dotstring}" + elif [[ -n "${output_file}" ]]; then + echo -e "${dotstring}" | "${DOCKER}" run --rm -i "${KUBLER_DEPGRAPH_IMAGE}" "${dot_exec[@]}" > "${output_file}" + else + echo -e "${dotstring}" | "${DOCKER}" run --rm -i "${KUBLER_DEPGRAPH_IMAGE}" "${dot_exec[@]}" + fi +} + +main "$@" diff --git a/lib/cmd/new.sh b/lib/cmd/new.sh index b3f6f76d..6c39a9da 100644 --- a/lib/cmd/new.sh +++ b/lib/cmd/new.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright (c) 2014-2017, Erik Dannenberg +# Copyright (c) 2014-2019, Erik Dannenberg # All rights reserved. # Adds given var_name and it's replacement to global assoc. array _template_values @@ -67,49 +67,56 @@ function add_namespace() { [[ "${AUTHOR}" =~ $regex ]] && def_author="${BASH_REMATCH[1]}" && def_mail="${BASH_REMATCH[2]}" [[ -n "${BUILD_ENGINE}" ]] && def_engine="${BUILD_ENGINE}" - msg '\n to accept default value\n' + msg_info_sub + msg_info_sub ' to accept default value' + msg_info_sub if [[ "${_NAMESPACE_TYPE}" == 'none' ]]; then - msg "--> What type of namespace? To allow multiple namespaces choose 'multi', else 'single'. - The only upshot of 'single' mode is saving one directory level, the downside is loss of cross-namespace access." + msg_info_sub "Namespace type? Either 'multi' or 'single' if you don't plan on adding further namespaces in the future." ask 'Type' 'multi' # shellcheck disable=SC2154 ns_type="${__ask}" add_template_filter_var '_tmpl_ns_type' "${ns_type}" - [[ "${ns_type}" != 'single' && "${ns_type}" != 'multi' ]] && die "\\nUnknown type: \"${ns_type}\"" + [[ "${ns_type}" != 'single' && "${ns_type}" != 'multi' ]] && die "Unknown type: \"${ns_type}\"" if [[ "${_NAMESPACE_TYPE}" == 'none' && "${ns_type}" == 'multi' ]]; then - msg '\n--> What dir name should be used for the top level dir holding the new namespace(s)?' + msg_info_sub + msg_info_sub "Top level directory name for new namespace '${ns_name}'? The directory is created at ${_NAMESPACE_DIR}/" ask 'Namespaces Dir' 'kubler-images' ns_dir="${_NAMESPACE_DIR}/${__ask}" - [[ -e "${ns_dir}" ]] && die "${ns_dir} already exists, aborting. If you intended to create the new namespace at this location use: \\n + [[ -e "${ns_dir}" ]] && die "Directory ${ns_dir} already exists, aborting. If you intended to create the new namespace at this location use: \\n ${_KUBLER_BIN} --working-dir=${ns_dir} new namespace ${ns_name}" real_ns_dir="${ns_dir}/${ns_name}" fi - msg "--> Initial image tag, a.k.a. version?" + msg_info_sub + msg_info "Initial image tag, a.k.a. version?" ask 'Image Tag' "${_TODAY}" add_template_filter_var '_tmpl_image_tag' "${__ask}" + msg_info_sub else - msg "Namespace Type: ${_NAMESPACE_TYPE}" + msg_info_sub + msg_warn "Namespace Type: ${_NAMESPACE_TYPE}" fi - msg "\\nNew namespace location: ${real_ns_dir}" + msg_warn "New namespace location: ${real_ns_dir}" + msg_info_sub - msg '\n--> Who maintains the new namespace?' + msg_info 'Who maintains the new namespace?' ask 'Name' "${def_author}" add_template_filter_var '_tmpl_author' "${__ask}" ask 'EMail' "${def_mail}" add_template_filter_var '_tmpl_author_email' "${__ask}" + msg_info_sub - msg '--> What type of images would you like to build?' + msg_info 'Used build engine?' ask 'Engine' "${def_engine}" ns_engine="${__ask}" add_template_filter_var '_tmpl_engine' "${ns_engine}" - [[ ! -f "${_LIB_DIR}/engine/${ns_engine}.sh" ]] && die "\\nUnknown engine: ${ns_engine}" + [[ ! -f "${_LIB_DIR}/engine/${ns_engine}.sh" ]] && die "Unknown engine: ${ns_engine}" [[ "${_NAMESPACE_TYPE}" == 'none' && "${ns_type}" == 'multi' ]] && mkdir "${ns_dir}" @@ -125,7 +132,7 @@ function add_namespace() { # default multi conf file can also be used for new single namespaces.. default_conf='multi' if [[ -z "${_KUBLER_BIN_HINT}" ]];then - kubler_bin_hint="cd ${ns_dir}\\n ${kubler_bin_hint}" + kubler_bin_hint="cd ${ns_dir}\\n $ ${kubler_bin_hint}" else kubler_bin_hint="${_KUBLER_BIN} --working-dir ${ns_dir}" fi @@ -141,38 +148,18 @@ function add_namespace() { [[ "${_NAMESPACE_TYPE}" == 'none' && "${ns_type}" == 'multi' ]] && \ replace_template_placeholders "${real_ns_dir}" - msg "*** Successfully created \"${ns_name}\" namespace at ${ns_dir} - -Configuration file: ${real_ns_dir}/${_KUBLER_CONF} - -To manage the new namespace with GIT you may want to run: - - git init ${real_ns_dir}" - - msg "\\nTo create images in the new namespace run: - - ${kubler_bin_hint} new image ${ns_name}/ -" -} - -# Arguments -# 1: namespace -# Return value: absolute path of kubler.conf for given namespace -function get_ns_conf() { - __get_ns_conf= - local namespace ns_conf_file - namespace="$1" - - ns_conf_file="${_NAMESPACE_DIR}/" - [[ "${_NAMESPACE_TYPE}" != 'single' ]] && ns_conf_file+="${namespace}/" - ns_conf_file+="${_KUBLER_CONF}" - [ -f "${ns_conf_file}" ] || die "Couldn't find ${ns_conf_file} - -Check spelling of \"${namespace}\" or create a new namespace by running: - - ${_KUBLER_BIN}${_KUBLER_BIN_HINT} new namespace ${namespace} -" - __get_ns_conf="${ns_conf_file}" + msg_info_sub + msg_ok "Successfully created \"${ns_name}\" namespace at ${ns_dir}" + msg_info_sub + msg_warn "Configuration file: ${real_ns_dir}/${_KUBLER_CONF}" + msg_info_sub + msg_warn "To manage the new namespace with GIT you may want to run:" + msg_info_sub + msg_info_sub "$ git init ${real_ns_dir}" + msg_info_sub + msg_warn "To create images in the new namespace run:" + msg_info_sub + msg_info_sub "$ ${kubler_bin_hint} new image ${ns_name}/" } # Create empty dir for given image and return the absolute path @@ -208,39 +195,56 @@ function init_image_base_dir() { # 1: namespace # 2: image_name function add_image() { - local namespace image_name image_parent image_path + local namespace image_name image_parent image_builder image_path namespace="$1" image_name="$2" - msg '\n to accept default value\n' - - msg '--> Extend an existing image? Fully qualified image id (i.e. kubler/busybox) if yes or scratch' + msg_info_sub + msg_info_sub ' to accept default value' + msg_info_sub + msg_info_sub 'Extend an existing Kubler managed image? Fully qualified image id (i.e. kubler/busybox) or scratch' ask 'Parent Image' 'scratch' image_parent="${__ask}" + image_builder="${DEFAULT_BUILDER}" + if [[ "${image_parent}" == 'scratch' ]]; then + msg_info_sub + msg_info_sub "Which builder should be used? Press to use the default builder of namespace ${namespace}" + ask 'Builder Id' "${DEFAULT_BUILDER}" + image_builder="${__ask}" + [[ "${target_id}" != *"/"* ]] && die "${target_id} should have format /" + [[ "${image_builder}" != "${DEFAULT_BUILDER}" ]] && add_template_sed_replace '^#BUILDER=' 'BUILDER=' + elif [[ "${image_parent}" != *"/"* || "${image_parent}" == *"/" ]]; then + die "\"${image_parent}\" should have format /" + fi + init_image_base_dir "${namespace}" "${image_name}" "${_IMAGE_PATH}" image_path="${__init_image_base_dir}" cp -r "${_LIB_DIR}/template/${BUILD_ENGINE}/image" "${image_path}" || die add_template_filter_var '_tmpl_image_parent' "${image_parent}" + add_template_filter_var '_tmpl_image_builder' "${image_builder}" replace_template_placeholders "${image_path}" - msg "*** Successfully created image \"${image_name}\" in namespace \"${namespace}\" at ${image_path}\\n" + msg_info_sub + msg_ok "Successfully created new image at ${image_path}" + msg_info_sub } # Arguments # 1: namespace # 2: builder_name function add_builder() { - local namespace builder_name builder_parent builder_path update_hint + local namespace builder_name builder_parent builder_path is_stage3_builder namespace="$1" builder_name="$2" - msg '\n to accept default value\n' - - msg '--> Extend an existing builder? Fully qualified image id (i.e. kubler/bob) if yes or else stage3' + msg_info_sub + msg_info_sub ' to accept default value' + msg_info_sub + msg_info_sub 'Extend existing Kubler builder image? Fully qualified image id (i.e. kubler/bob) or stage3' ask 'Parent Image' 'stage3' builder_parent="${__ask}" @@ -248,10 +252,10 @@ function add_builder() { if [[ "${builder_parent}" == "stage3" ]]; then builder_parent='\${NAMESPACE}/bob' add_template_sed_replace '^BUILDER' '#BUILDER' - update_hint="You should check for latest stage3 files by running:\\n - ${_KUBLER_BIN}${_KUBLER_BIN_HINT} update --no-sync - " + is_stage3_builder='true' else + [[ "${builder_parent}" != *"/"* || "${builder_parent}" == *"/" ]] \ + && die "\"${builder_parent}\" should have format /" add_template_sed_replace '^STAGE3' '#STAGE3' fi @@ -260,10 +264,28 @@ function add_builder() { cp -r "${_LIB_DIR}/template/${BUILD_ENGINE}/builder" "${builder_path}" || die + local build_sh_use build_sh_rm + build_sh_use='build_ext.sh' + build_sh_rm='build_stage3.sh' + if [[ "${is_stage3_builder}" == 'true' ]]; then + build_sh_use='build_stage3.sh' + build_sh_rm='build_ext.sh' + fi + [[ -f "${builder_path}"/"${build_sh_use}" ]] && mv "${builder_path}"/"${build_sh_use}" "${builder_path}"/build.sh + [[ -f "${builder_path}"/"${build_sh_rm}" ]] && rm "${builder_path}"/"${build_sh_rm}" + add_template_filter_var '_tmpl_builder' "${builder_parent}" replace_template_placeholders "${builder_path}" - msg "*** Successfully created \"${builder_name}\" builder at ${builder_path}\\n${update_hint}" + msg_info_sub + msg_ok "Successfully created new builder at ${builder_path}" + msg_info_sub + if [[ -n "${is_stage3_builder}" ]]; then + msg_warn "Configure the STAGE3_BASE in ${builder_path}/build.conf then run:" + msg_info_sub + msg_info_sub "$ ${_KUBLER_BIN}${_KUBLER_BIN_HINT} update" + msg_info_sub + fi } function main() { @@ -284,7 +306,8 @@ function main() { && die "Invalid ${_arg_template_type} name '${target_id}', should be lower case only." if [[ "${_arg_template_type}" != 'namespace' ]]; then - [[ "${target_id}" != *"/"* ]] && die "\"${target_id}\" should have format /" + [[ "${target_id}" != *"/"* || "${target_id}" == *"/" ]] \ + && die "\"${target_id}\" should have format /" [[ "${_NAMESPACE_TYPE}" == 'none' ]] \ && die "${_NAMESPACE_DIR} is not a valid Kubler namespace dir" fi @@ -292,9 +315,9 @@ function main() { add_template_filter_var '_tmpl_namespace' "${target_namespace}" if [[ "${_arg_template_type}" == 'image' || "${_arg_template_type}" == 'builder' ]]; then - get_ns_conf "${target_namespace}" - # shellcheck source=dock/kubler/kubler.conf - source "${__get_ns_conf}" + get_ns_include_path "${target_namespace}" + # shellcheck disable=SC2154 + source_namespace_conf "${__get_ns_include_path}" add_template_filter_var '_tmpl_image_name' "${target_image_name}" fi diff --git a/lib/cmd/push.sh b/lib/cmd/push.sh index 66f5ae12..45fc5337 100644 --- a/lib/cmd/push.sh +++ b/lib/cmd/push.sh @@ -1,15 +1,13 @@ #!/usr/bin/env bash -# Copyright (c) 2014-2017, Erik Dannenberg +# Copyright (c) 2014-2019, Erik Dannenberg # All rights reserved. function main() { local image_id current_ns - cd "${_NAMESPACE_DIR}" || die "Failed to change dir to ${_NAMESPACE_DIR}" # shellcheck disable=SC2154 expand_requested_target_ids "${_arg_target_id[@]}" # shellcheck disable=SC2154 - msg "--> push: ${__expand_requested_target_ids:1}" - for image_id in ${__expand_requested_target_ids}; do + for image_id in "${__expand_requested_target_ids[@]}"; do current_ns="${image_id%%/*}" expand_image_id "${image_id}" "${_IMAGE_PATH}" # shellcheck disable=SC2154 @@ -17,7 +15,8 @@ function main() { source_push_conf "${image_id}" if ! image_exists "${image_id}"; then - msg "--> skipping ${image_id}:${IMAGE_TAG}, image is not build yet" + add_status_value "${image_id}" + msg_warn "skipped, image is not build yet." continue fi diff --git a/lib/cmd/update.sh b/lib/cmd/update.sh index 152f1a55..bb114ee1 100644 --- a/lib/cmd/update.sh +++ b/lib/cmd/update.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright (c) 2014-2017, Erik Dannenberg +# Copyright (c) 2014-2019, Erik Dannenberg # All rights reserved. # Compare given local and remote stage3 date, returns 0 if remote is newer or 1 if not @@ -29,12 +29,16 @@ function update_builders() { current_ns="$1" builder_path="$2" update_count=0 - if [[ -d "${builder_path}" ]]; then + add_status_value 'stage3' + add_status_value "${current_ns}" 'true' + if [[ -d "${builder_path}" ]] && ! dir_is_empty "${builder_path}"; then cd "${builder_path}" || die "Failed to change dir to ${builder_path}" for current_builder in */; do update_status= - cd "${_NAMESPACE_DIR}" || die "Failed to change dir to ${_NAMESPACE_DIR}" - source_image_conf "${current_ns}/${_BUILDER_PATH}/${current_builder}" + add_status_value 'stage3' + add_status_value "${current_ns}" 'true' + add_status_value "${current_builder::-1}" 'true' + source_image_conf "${current_ns}/${_BUILDER_PATH}${current_builder}" if [[ -n "${STAGE3_BASE}" ]]; then fetch_stage3_archive_name || die "Couldn't find a stage3 file for ${ARCH_URL}" get_stage3_archive_regex "${STAGE3_BASE}" @@ -52,51 +56,58 @@ function update_builders() { update_status="up-to-date ${STAGE3_DATE} - ${STAGE3_BASE}" fi else - update_status="error: couldn't parse remote STAGE3 DATE from ${ARCH_URL}" + die "Failed to parse remote STAGE3 DATE from ${ARCH_URL}" fi else update_status="n/a - extends ${BUILDER}" fi - msgf "${current_builder}" "${update_status}" + msg_info "${update_status}" done else - msg "--> no build containers" + msg_info "no build containers" fi __update_builders=${update_count} } # Update STAGE3_DATE in build.conf for all builders in all namespaces function update_stage3_date() { - local current_ns builder_path + local ns_paths current_ns_dir builder_path + ns_paths=( "${_KUBLER_NAMESPACE_DIR}" ) + [[ "${_NAMESPACE_DIR}" != "${_KUBLER_NAMESPACE_DIR}" && "${_NAMESPACE_TYPE}" != 'single' ]] \ + && ns_paths+=( "${_NAMESPACE_DIR}" ) update_count=0 - cd "${_NAMESPACE_DIR}" || die "Failed to change dir to ${_NAMESPACE_DIR}" - if [[ "${_NAMESPACE_TYPE}" == 'single' ]]; then - update_builders "${current_ns}" "${_NAMESPACE_DIR}/${_BUILDER_PATH}" - else - for current_ns in */; do - msg "${current_ns}" - builder_path="${_NAMESPACE_DIR}/${current_ns}${_BUILDER_PATH}" - update_count=$((update_count+=__update_builders)) + for current_ns_dir in "${ns_paths[@]}"; do + pushd "${current_ns_dir}" 1> /dev/null || die + local ns + for ns in "${current_ns_dir}"/*/; do + current_ns="$(basename -- "${ns}")" + add_status_value 'stage3' + add_status_value "${current_ns}" 'true' + builder_path="${ns}/${_BUILDER_PATH}" update_builders "${current_ns}" "${builder_path}" + update_count=$((update_count + __update_builders)) done + popd 1> /dev/null || die + done + + if [[ "${_NAMESPACE_TYPE}" == 'single' ]]; then + current_ns="$(basename -- "${_NAMESPACE_DIR}")" + add_status_value 'stage3' + add_status_value "${current_ns}" 'true' + update_builders "${current_ns}" "${_NAMESPACE_DIR}/${_BUILDER_PATH}" fi - if [[ "${_NAMESPACE_TYPE}" != 'local' ]]; then - msg "kubler" - update_builders 'kubler' "${_KUBLER_NAMESPACE_DIR}/kubler/${_BUILDER_PATH}" - update_count=$((update_count+=__update_builders)) - fi + add_status_value "stage3" if [[ ${update_count} -eq 0 ]]; then - msg '\nAll stage3 dates are up to date.' + msg_ok 'all stage3 dates are up to date.' else - msg "\\nFound updates for ${update_count} stage3 file(s), to rebuild run:\\n - ${_KUBLER_BIN}${_KUBLER_BIN_HINT} clean - ${_KUBLER_BIN}${_KUBLER_BIN_HINT} build -C some_namespace\\n" + msg_warn "Found updates for ${update_count} stage3 file(s), to rebuild run:\\n + $ ${_KUBLER_BIN}${_KUBLER_BIN_HINT} clean + $ ${_KUBLER_BIN}${_KUBLER_BIN_HINT} build -C some_namespace\\n" fi } # Arguments: -# -# 1: builder_id - optional, default "kubler/bob-musl" +# 1: builder_id - optional, default "kubler/bob" function update_portage() { local builder_id builder_id="${1:-${DEFAULT_BUILDER}}" @@ -114,18 +125,26 @@ function update_portage() { _container_mount_portage='true' # shellcheck disable=SC2034 _container_cmd=("portage-git-sync") - msg "--> run emerge --sync using ${builder_id}" + msg_info "run emerge --sync using ${builder_id}" run_image "${builder_id}" "portage-sync-worker" } function main() { + # clone kubler-images repo if non-existing and enabled + if [[ "${KUBLER_DISABLE_KUBLER_NS}" != 'true' ]]; then + add_status_value 'kubler-images' + clone_or_update_git_repo "${_KUBLER_NS_GIT_URL}" "${_KUBLER_NAMESPACE_DIR}" 'kubler' + fi + # shellcheck disable=SC2154 - if [[ "${_arg_no_sync}" == 'off' ]]; then - msg "*** sync portage container" + if [[ "${KUBLER_PORTAGE_GIT}" == 'true' ]]; then + add_status_value 'portage' + msg_info "sync container" # shellcheck disable=SC2154 update_portage "${_arg_builder_image}" fi - msg "*** check for stage3 updates" + add_status_value 'stage3' + msg_info "check all namespaces for new releases" update_stage3_date } diff --git a/lib/core.sh b/lib/core.sh index 27ed5d3a..3fd45402 100644 --- a/lib/core.sh +++ b/lib/core.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2014-2017, Erik Dannenberg +# Copyright (c) 2014-2019, Erik Dannenberg # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the @@ -10,7 +10,7 @@ # disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -# following disclaimer in the documentation and/or other materials provided with the distribution. +# following disclaimer in the documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -21,7 +21,16 @@ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -readonly _KUBLER_NAMESPACE_DIR="${_KUBLER_DIR}"/dock +KUBLER_DISABLE_KUBLER_NS="${KUBLER_DISABLE_KUBLER_NS:-false}" + +KUBLER_DOWNLOAD_DIR="${KUBLER_DOWNLOAD_DIR:-${KUBLER_DATA_DIR}/downloads}" +KUBLER_DISTFILES_DIR="${KUBLER_DISTFILES_DIR:-${KUBLER_DATA_DIR}/distfiles}" +KUBLER_PACKAGES_DIR="${KUBLER_PACKAGES_DIR:-${KUBLER_DATA_DIR}/packages}" +KUBLER_DEPGRAPH_IMAGE="${KUBLER_DEPGRAPH_IMAGE:-kubler/graph-easy}" + +readonly _KUBLER_NAMESPACE_DIR="${KUBLER_DATA_DIR}"/namespaces +readonly _KUBLER_LOG_DIR="${KUBLER_DATA_DIR}"/log +readonly _KUBLER_NS_GIT_URL='https://github.com/edannenberg/kubler-images.git' readonly _KUBLER_CONF='kubler.conf' readonly _IMAGE_PATH="images/" readonly _BUILDER_PATH="builder/" @@ -38,56 +47,93 @@ BOB_HOST_GID=$(id -g) # stage3 defaults, override via build container .conf STAGE3_BASE="stage3-amd64-hardened+nomultilib" +_kubler_trap_functions=() +_kubler_internal_abort= + # used as primitive caching mechanism _last_sourced_engine= _last_sourced_image= _last_sourced_push_conf= -# Arguments -# n: message -function msg() { - echo -e "$@" +# shellcheck source=lib/util.sh +source "${_LIB_DIR}"/util.sh || die + +# Helper function that provides parsable data for Kubler's bash completion script. +function bc_helper() { + [[ -z "${KUBLER_BC_HELP}" ]] && return + local available_cmds cmd + available_cmds=() + for cmd in "${_LIB_DIR}"/cmd/*.sh "${KUBLER_DATA_DIR}"/cmd/*.sh; do + available_cmds+=("$(basename -- "${cmd%.*}")") + done + + echo "${KUBLER_DATA_DIR}" + echo "${_NAMESPACE_DIR}" + echo "${_NAMESPACE_TYPE}" + echo "${_NAMESPACE_DEFAULT}" + echo "${available_cmds[*]}" + exit 0 } -# printf version of msg(), 20 char padding between prefix and suffix +# The main trap handler for any command script, do not call this function manually! Instead add your own trap handlers +# like so: # -# Arguments: -# 1: msg_prefix -# n: msg_suffix -function msgf() { - local msg_prefix - msg_prefix="$1" - shift - printf '%s %-20s %s\n' '-->' "${msg_prefix}" "$@" +# add_trap_fn myhandler_fn +# do stuff.. +# rm_trap_fn myhandler_fn +# +# If your handler requires arguments use a global var named _myhandler_fn_args. The rm_trap_fn function will unset this +# var should it be set. +# +# Note that your trap handler should not exit the script, it might prevent other handlers from executing. +function kubler_abort_handler() { + local trap_fn + [[ "${_kubler_internal_abort}" != 'true' ]] && { echo -e ""; msg_error "caught interrupt, aborting.."; } + for trap_fn in "${_kubler_trap_functions[@]}"; do + [[ -z "${trap_fn}" ]] && continue + "${trap_fn}" + done + die } -# Read user input displaying given question -# # Arguments: -# 1: question -# 2: default_value -# Return value: user input or passed default_value -function ask() { - __ask= - local question default_value - question="$1" - default_value="$2" - read -r -p "${question} (${default_value}): " __ask - [[ -z "${__ask}" ]] && __ask="${default_value}" +# 1: fn_name - function name that should be called on abort +function add_trap_fn() { + local fn_name + fn_name="$1"; + _kubler_trap_functions+=( "${fn_name}" ) } # Arguments: -# 1: file_path as string -# 2: error_msg, optional -function file_exists_or_die() { - local file error_msg - file="$1" - [[ -z "$2" ]] && error_msg="Couldn't read: ${file}" - [[ -f "${file}" ]] || die "${error_msg}" +# 1: fn_name - function name that should be removed from global trap handler +function rm_trap_fn() { + local fn_name trap_fn tmp_array + fn_name="$1"; + tmp_array=() + for trap_fn in "${_kubler_trap_functions[@]}"; do + [[ "${trap_fn}" != "${fn_name}" ]] && tmp_array+=("${trap_fn}") + done + unset _"${fn_name}"_args + _kubler_trap_functions=( "${tmp_array[@]}" ) } -function sha_sum() { - [[ $(command -v sha512sum) ]] && echo 'sha512sum' || echo 'shasum -a512' +# Sets __get_include_path to absolute path for given relative file_sub_path. The function will check both +# KUBLER_DATA_DIR and _LIB_DIR, in that order. First hit wins, returns exit signal 3 if the path doesn't exist. +# +# Arguments: +# 1: file_sub_path as string +function get_include_path() { + __get_include_path= + local file_sub_path base_path + file_sub_path="$1" + if [[ -f "${KUBLER_DATA_DIR}/${file_sub_path}" ]]; then + base_path="${KUBLER_DATA_DIR}" + elif [[ -f "${_LIB_DIR}/${file_sub_path}" ]]; then + base_path="${_LIB_DIR}" + else + return 3 + fi + __get_include_path="${base_path}/${file_sub_path}" } # Make sure required binaries are in PATH @@ -100,108 +146,113 @@ function has_required_binaries() { done } -# Returns 0 if given string contains given word or 3 if not. Does *not* match substrings. +# Source build engine script depending on passed engine_id or BUILD_ENGINE value # # Arguments: -# 1: string -# 2: word -function string_has_word() { - local regex - regex="(^| )${2}($| )" - if [[ "${1}" =~ $regex ]];then - return 0 - else - return 3 +# 1: engine_id - optional, default: value of BUILD_ENGINE +# shellcheck disable=SC2120 +function source_build_engine() { + local engine_id + engine_id="${1:-${BUILD_ENGINE}}" + if [[ "${_last_sourced_engine}" != "${engine_id}" ]]; then + get_include_path "engine/${engine_id}.sh" || die "Couldn't find build engine: ${engine_id}" + # shellcheck source=lib/engine/docker.sh + source "${__get_include_path}" + _last_sourced_engine="${engine_id}" fi } -# Run sed over given $file with given $sed_args array +# Return namespace dir of given absolute image_path. # # Arguments: -# 1: full file path as string -# 2: sed_args as array -function replace_in_file() { - local file_path sed_arg - file_path="${1}" - declare -a sed_arg=("${!2}") - sed "${sed_arg[@]}" "${file_path}" > "${file_path}.tmp" || die - mv "${file_path}.tmp" "${file_path}" || die -} - -# Source build engine script depending on BUILD_ENGINE value -function source_build_engine() { - local engine - engine="${_LIB_DIR}/engine/${BUILD_ENGINE}.sh" - if [[ "${_last_sourced_engine}" != "${BUILD_ENGINE}" ]]; then - file_exists_or_die "${engine}" - # shellcheck source=lib/engine/docker.sh - source "${engine}" - _last_sourced_engine="${BUILD_ENGINE}" +# 1: image_path +function get_ns_dir_by_image_path() { + __get_ns_dir_by_image_path= + local image_path + image_path="$1" + if [[ "${image_path}" == /*/"${_IMAGE_PATH}"* ]]; then + image_path="${image_path%%/${_IMAGE_PATH}*}" + elif [[ "${image_path}" == /*/"${_BUILDER_PATH}"* ]]; then + image_path="${image_path%%/${_BUILDER_PATH}*}" + else + return 3 fi + __get_ns_dir_by_image_path="${image_path}" } -# Read namespace build.conf for given image_id +# Read namespace kubler.conf for given absolute ns_dir # # Arguments: -# 1: image_id (i.e. kubler/busybox) +# 1: ns_dir function source_namespace_conf() { - local image_id current_ns conf_file - image_id="$1" + local ns_dir conf_file final_tag + ns_dir="$1" - [[ "${_NAMESPACE_TYPE}" == 'single' ]] && return 0 + # reset to system config at /etc/kubler.conf or _KUBLER_DIR/kubler.conf first.. + # shellcheck source=kubler.conf disable=SC1090 + source "${_kubler_system_conf}" - # reset to possible user conf first.. - # shellcheck disable=SC1090 - [[ -f "${_ns_conf}" ]] && source "${_ns_conf}" + # ..then read user config at KUBLER_DATA_DIR/kubler.conf.. + # shellcheck source=kubler.conf + [[ -f "${_kubler_user_conf}" ]] && source "${_kubler_user_conf}" - # ..then read project conf to initialize any missing defaults if necessary + # ..then current multi namespace conf.. # shellcheck source=kubler.conf - [[ "${_ns_conf}" != "${_global_conf}" ]] && source "${_global_conf}" + [[ "${_kubler_ns_conf}" != "${_kubler_user_conf}" && -f "${_kubler_ns_conf}" ]] && source "${_kubler_ns_conf}" + [[ -n "${IMAGE_TAG}" ]] && final_tag="${IMAGE_TAG}" - [[ "${image_id}" != *"/"* ]] && return 0 + conf_file="${ns_dir}"/"${_KUBLER_CONF}" - current_ns="${image_id%%/*}" - get_abs_ns_path "${current_ns}" - conf_file="${__get_abs_ns_path}"/"${_KUBLER_CONF}" + # ..finally read current namespace conf + # shellcheck source=kubler.conf + file_exists_or_die "${conf_file}" "Couldn't read namespace conf ${conf_file}" && source "${conf_file}" + + [[ -z "${IMAGE_TAG}" ]] && die 'No IMAGE_TAG defined in any kubler.conf file.' + # silently ignore IMAGE_TAG if it was already defined in a parent kubler.conf + [[ -n "${IMAGE_TAG}" && -n "${final_tag}" && "${IMAGE_TAG}" != "${final_tag}" ]] \ + && IMAGE_TAG="${final_tag}" - # ..then read current namespace conf - # shellcheck source=dock/kubler/kubler.conf - file_exists_or_die "${conf_file}" && source "${conf_file}" - _current_namespace="${current_ns}" - # just for BC and to make build.conf/templates a bit more consistent to use. not used otherwise - NAMESPACE="${current_ns}" + _current_namespace="$(basename -- "${ns_dir}")" + # just for BC and to make build.conf/templates a bit more consistent to use. not used otherwise internally + NAMESPACE="${_current_namespace}" - source_build_engine + source_build_engine "${BUILD_ENGINE}" } # Read image build.conf for given image_path # # Arguments: -# 1: image_path (i.e. kubler/images/busybox) +# 1: image_path - can be either relative to a namespace dir or an absolute path function source_image_conf() { local image_path build_conf image_path="$1" + # exit if we just sourced the given build.conf [[ "${_last_sourced_image}" == "${image_path}" ]] && return 0 - if [[ "${_NAMESPACE_TYPE}" != 'single' ]]; then - unset BOB_CHOST BOB_CFLAGS BOB_CXXFLAGS BOB_BUILDER_CHOST BOB_BUILDER_CFLAGS BOB_BUILDER_CXXFLAGS ARCH ARCH_URL IMAGE_TAG - source_namespace_conf "${image_path}" + unset BOB_CHOST BOB_CFLAGS BOB_CXXFLAGS BOB_BUILDER_CHOST BOB_BUILDER_CFLAGS BOB_BUILDER_CXXFLAGS ARCH ARCH_URL IMAGE_TAG + unset POST_BUILD_HC POST_BUILD_HC_MAX_DURATION POST_BUILD_HC_INTERVAL POST_BUILD_HC_START_PERIOD POST_BUILD_HC_RETRY + + if [[ "${image_path}" != '/'* ]]; then + get_ns_include_path "${image_path}" + image_path="${__get_ns_include_path}" fi - unset STAGE3_BASE STAGE3_DATE IMAGE_PARENT BUILDER BUILDER_CAPS_SYS_PTRACE BUILDER_DOCKER_ARGS - [[ -z "${_use_parent_builder_mounts}" ]] && unset BUILDER_MOUNTS - get_abs_ns_path "${image_path}" - build_conf="${__get_abs_ns_path}/"build.conf + get_ns_dir_by_image_path "${image_path}" + source_namespace_conf "${__get_ns_dir_by_image_path}" - # shellcheck source=dock/kubler/images/busybox/build.conf - file_exists_or_die "${build_conf}" && source "${build_conf}" + unset STAGE3_BASE STAGE3_DATE IMAGE_PARENT BUILDER BUILDER_CAPS_SYS_PTRACE BUILDER_DOCKER_ARGS + [[ "${_use_parent_builder_mounts}" != 'true' ]] && unset BUILDER_MOUNTS + + build_conf="${image_path}/"build.conf + file_exists_or_die "${build_conf}" "Couldn't read image config ${build_conf}" + # shellcheck source=lib/template/docker/image/build.conf + source "${build_conf}" # assume scratch if IMAGE_PARENT is not set [[ -z "${IMAGE_PARENT}" ]] && IMAGE_PARENT='scratch' # stage3 overrides BUILDER, unset if defined [[ -n "${STAGE3_BASE}" ]] && unset BUILDER - _last_sourced_image="${image_path}" } @@ -211,7 +262,7 @@ function source_image_conf() { # 1: image_id (i.e. kubler/busybox) function source_push_conf() { local namespace - namespace=${1%%/*} + namespace="${1%%/*}" # exit if we just sourced for this NS [[ "${_last_sourced_push_conf}" == "${namespace}" ]] && return 0 # shellcheck disable=SC1090 @@ -219,7 +270,7 @@ function source_push_conf() { _last_sourced_push_conf="${namespace}" } -# Check image dependencies and return base build container for given image_id. Recursive. +# Check image dependencies and return base build container id for given image_id. Recursive. # # Arguments: # @@ -229,8 +280,8 @@ function get_image_builder_id() { local image_id image_id="$1" [[ "${image_id}" == 'scratch' ]] && __get_image_builder_id="${DEFAULT_BUILDER}" && return 0 - expand_image_id "${image_id}" "${_IMAGE_PATH}" - if [[ -n "${image_id}" && "${image_id}" != "scratch" ]]; then + if [[ -n "${image_id}" && "${image_id}" != 'scratch' ]]; then + expand_image_id "${image_id}" "${_IMAGE_PATH}" # shellcheck disable=SC2154 source_image_conf "${__expand_image_id}" if [[ -n "${BUILDER}" ]];then @@ -269,36 +320,44 @@ function fetch_stage3_archive_name() { } # Download and verify stage3 tar ball +# +# Arguments: +# 1: stage3_file function download_stage3() { - [[ -d "${DOWNLOAD_PATH}" ]] || mkdir -p "${DOWNLOAD_PATH}" - local is_autobuild stage3_contents stage3_digests sha512_hashes sha512_check sha512_failed wget_exit + [[ -d "${KUBLER_DOWNLOAD_DIR}" ]] || mkdir -p "${KUBLER_DOWNLOAD_DIR}" + local is_autobuild stage3_file stage3_contents stage3_digests sha512_hashes sha512_check sha512_failed \ + wget_exit wget_args is_autobuild=false - fetch_stage3_archive_name || die "Couldn't find a stage3 file for ${ARCH_URL}" - _stage3_file="${__fetch_stage3_archive_name}" - stage3_contents="${_stage3_file}.CONTENTS" - stage3_digests="${_stage3_file}.DIGESTS" + stage3_file="$1" + stage3_contents="${stage3_file}.CONTENTS" + stage3_digests="${stage3_file}.DIGESTS" if [[ "${ARCH_URL}" == *autobuilds* ]]; then - stage3_digests="${_stage3_file}.DIGESTS.asc" + stage3_digests="${stage3_file}.DIGESTS.asc" is_autobuild=true fi - for file in "${_stage3_file}" "${stage3_contents}" "${stage3_digests}"; do - [ -f "${DOWNLOAD_PATH}/${file}" ] && continue - trap 'handle_download_error ${DOWNLOAD_PATH}/${file}' EXIT - wget -O "${DOWNLOAD_PATH}/${file}" "${ARCH_URL}${file}" + wget_args=() + [[ "${_arg_verbose}" == 'off' ]] && wget_args+=( '-q' '-nv' ) + + for file in "${stage3_file}" "${stage3_contents}" "${stage3_digests}"; do + [ -f "${KUBLER_DOWNLOAD_DIR}/${file}" ] && continue + + _handle_download_error_args="${KUBLER_DOWNLOAD_DIR}/${file}" + add_trap_fn 'handle_download_error' + wget "${wget_args[@]}" -O "${KUBLER_DOWNLOAD_DIR}/${file}" "${ARCH_URL}${file}" wget_exit=$? - [[ "${wget_exit}" -eq 8 ]] && msg "*** Got a 404 for ${file}, try running the update command to resolve this." + [[ "${wget_exit}" -eq 8 ]] && msg_error "HTTP 404 for ${file}, try running the update command to resolve this." [[ "${wget_exit}" -ne 0 ]] && exit $? - trap - EXIT + rm_trap_fn 'handle_download_error' done # shellcheck disable=SC2154 if [ "${_arg_skip_gpg_check}" = false ] && [ "${is_autobuild}" = true ]; then - gpg --verify "${DOWNLOAD_PATH}/${stage3_digests}" || die "insecure digests" + gpg --verify "${KUBLER_DOWNLOAD_DIR}/${stage3_digests}" || die "Insecure digests" elif [ "${is_autobuild}" = false ]; then msg "GPG verification not supported for experimental stage3 tar balls, only checking SHA512" fi - sha512_hashes="$(grep -A1 SHA512 "${DOWNLOAD_PATH}/${stage3_digests}" | grep -v '^--')" - sha512_check="$(cd "${DOWNLOAD_PATH}/" && (echo "${sha512_hashes}" | $(sha_sum) -c))" + sha512_hashes="$(grep -A1 SHA512 "${KUBLER_DOWNLOAD_DIR}/${stage3_digests}" | grep -v '^--')" + sha512_check="$(cd "${KUBLER_DOWNLOAD_DIR}/" && (echo "${sha512_hashes}" | $(sha_sum) -c))" sha512_failed="$(echo "${sha512_check}" | grep FAILED)" if [ -n "${sha512_failed}" ]; then die "${sha512_failed}" @@ -306,96 +365,102 @@ function download_stage3() { } # Download and verify portage snapshot, when using latest it will download at most once per day +# +# Arguments: +# 1: portage_file function download_portage_snapshot() { PORTAGE_DATE="${PORTAGE_DATE:-latest}" PORTAGE_URL="${PORTAGE_URL:-${MIRROR}snapshots/}" - [[ -d "${DOWNLOAD_PATH}" ]] || mkdir -p "${DOWNLOAD_PATH}" - local portage_sig portage_md5 file dl_name - _portage_file="portage-${PORTAGE_DATE}.tar.xz" - portage_sig="${_portage_file}.gpgsig" - portage_md5="${_portage_file}.md5sum" + [[ -d "${KUBLER_DOWNLOAD_DIR}" ]] || mkdir -p "${KUBLER_DOWNLOAD_DIR}" + local portage_file portage_sig portage_md5 file dl_name wget_args + portage_file="$1" + portage_sig="${portage_file}.gpgsig" + portage_md5="${portage_file}.md5sum" - for file in "${_portage_file}" "${portage_sig}" "${portage_md5}"; do + for file in "${portage_file}" "${portage_sig}" "${portage_md5}"; do dl_name="${file}" if [[ "${PORTAGE_DATE}" == 'latest' ]]; then - dl_name="${_portage_file//latest/${_TODAY}}" + dl_name="${portage_file//latest/${_TODAY}}" fi - if [ ! -f "${DOWNLOAD_PATH}/${dl_name}" ]; then - trap 'handle_download_error ${DOWNLOAD_PATH}/${dl_name}' EXIT - wget -O "${DOWNLOAD_PATH}/${dl_name}" "${MIRROR}snapshots/${file}" || exit $? - trap - EXIT + if [[ ! -f "${KUBLER_DOWNLOAD_DIR}/${dl_name}" ]]; then + wget_args=() + [[ "${_arg_verbose}" == 'off' ]] && wget_args+=( '-q' '-nv' ) + _handle_download_error_args="${KUBLER_DOWNLOAD_DIR}/${dl_name}" + add_trap_fn 'handle_download_error' + wget "${wget_args[@]}" -O "${KUBLER_DOWNLOAD_DIR}/${dl_name}" "${MIRROR}snapshots/${file}" || exit $? + rm_trap_fn 'handle_download_error' fi done # use current date instead of latest from here on if [[ "${PORTAGE_DATE}" == 'latest' ]]; then - portage_sig="${_portage_file//latest/${_TODAY}}.gpgsig" - portage_md5="${_portage_file//latest/${_TODAY}}.md5sum" - _portage_file="${_portage_file//latest/${_TODAY}}" + portage_sig="${portage_file//latest/${_TODAY}}.gpgsig" + portage_md5="${portage_file//latest/${_TODAY}}.md5sum" + portage_file="${portage_file//latest/${_TODAY}}" PORTAGE_DATE="${_TODAY}" fi - if [[ "${_arg_skip_gpg_check}" != 'on' ]] && [[ -f "${DOWNLOAD_PATH}/${portage_sig}" ]]; then - gpg --verify "${DOWNLOAD_PATH}/${portage_sig}" "${DOWNLOAD_PATH}/${_portage_file}" || die "Insecure digests." + if [[ "${_arg_skip_gpg_check}" != 'on' ]] && [[ -f "${KUBLER_DOWNLOAD_DIR}/${portage_sig}" ]]; then + gpg --verify "${KUBLER_DOWNLOAD_DIR}/${portage_sig}" "${KUBLER_DOWNLOAD_DIR}/${portage_file}" || die "Insecure digests." fi } -# Arguments: -# 1: file - full path of downloaded file -# 2: error_message - optional function handle_download_error() { local file msg - file="$1" + file="${_handle_download_error_args}" msg="${2:-Aborted download of ${file}}" [[ -f "${file}" ]] && rm "${file}" die "${msg}" } -# Return the correct absolute path for given relative_image_path: -# -# 1. returns input if path is actually absolute -# 2. the path starts with kubler -> return abs path for internal kubler namespace -# 3. else -> return abs path for current namespace dir +# Returns the absolute path for given relative_image_path. # # Arguments: # 1: relative_image_path -function get_abs_ns_path() { - __get_abs_ns_path= +function get_ns_include_path() { + __get_ns_include_path= local relative_image_path abs_path relative_image_path="$1" # return input if it's actually an absolute path - [[ "${relative_image_path}" == "/"* ]] && __get_abs_ns_path="${relative_image_path}" && return 0 + [[ "${relative_image_path}" == "/"* ]] && __get_ns_include_path="${relative_image_path}" && return 0 - if [[ "${relative_image_path}" == "kubler" || "${relative_image_path}" == "kubler/"* ]]; then - abs_path="${_KUBLER_NAMESPACE_DIR}"/"${relative_image_path}" + if [[ "${_NAMESPACE_TYPE}" == 'single' ]] && \ + [[ "${relative_image_path}" == "${_NAMESPACE_DEFAULT}"/* || "${relative_image_path}" == "${_NAMESPACE_DEFAULT}" ]] + then + relative_image_path="${relative_image_path//${_NAMESPACE_DEFAULT}//}" + abs_path="${_NAMESPACE_DIR}/${relative_image_path}" else - abs_path="${_NAMESPACE_DIR}"/"${relative_image_path}" + if [[ -d "${_NAMESPACE_DIR}/${relative_image_path}" ]]; then + abs_path="${_NAMESPACE_DIR}/${relative_image_path}" + elif [[ -d "${_KUBLER_NAMESPACE_DIR}/${relative_image_path}" || "${relative_image_path}" == *"-core" ]]; then + abs_path="${_KUBLER_NAMESPACE_DIR}/${relative_image_path}" + else + return 3 + fi fi - __get_abs_ns_path="${abs_path}" + __get_ns_include_path="${abs_path}" } -# Sets __expand_image_id to image sub-path for given image_id +# Sets __expand_image_id to absolute image path for given image_id and image_type # # 1: image_id (i.e. kubler/busybox) -# 2: image_type ($_IMAGE_PATH or $_BUILDER_PATH) +# 2: image_type ($_IMAGE_PATH or $_BUILDER_PATH), optional, default: $_IMAGE_PATH function expand_image_id() { __expand_image_id= - local image_id image_type msg_type + local image_id image_type image_ns expand_id image_id="$1" - image_type="$2" - msg_type='image' - [[ "${image_type}" == "${_BUILDER_PATH}" ]] && msg_type="builder" - if [[ "${_NAMESPACE_TYPE}" == 'single' ]]; then + image_type="${2:-${_IMAGE_PATH}}" + image_ns="${image_id%%/*}" + if [[ "${_NAMESPACE_TYPE}" == 'single' && "${image_ns}" == "${_NAMESPACE_DEFAULT}" ]]; then if [[ "${image_id}" == *"/"* ]]; then - image_ns="${image_id%%/*}" - [[ "${image_ns}" != "${_current_namespace}" ]] \ - && die "Unknown namespace \"${image_ns}\" for ${msg_type} ${image_id}, expected \"${_current_namespace}\"" image_id="${image_id##*/}" fi - __expand_image_id="${image_type}${image_id}" + expand_id="${image_type}${image_id}" else - __expand_image_id="${image_id/\//\/${image_type}}" + expand_id="${image_id/\//\/${image_type}}" fi + get_ns_include_path "${expand_id}" || return $? + __expand_image_id="${__get_ns_include_path}" } # Expand requested namespace and image mix of passed target_ids to fully qualified image ids @@ -405,56 +470,45 @@ function expand_image_id() { # n: target_id (i.e. namespace or namespace/image) function expand_requested_target_ids() { __expand_requested_target_ids= - local target_ids expanded target image current_ns + local target_ids expanded target image is_processed target_ids=( "$@" ) - expanded="" - if [[ "${_NAMESPACE_TYPE}" == 'single' ]]; then - current_ns="$(basename -- "${_NAMESPACE_DIR}")" - for target in "${target_ids[@]}"; do - # strip namespace if this is a fully qualified image id, redundant in single namespace mode - if [[ "${target}" == *"/"* ]]; then - [[ "${target%%/*}" != "${current_ns}" ]] && die "Invalid namespace for ${target}, expected: ${current_ns}" - target="${target##*/}" + expanded=() + for target in "${target_ids[@]}"; do + is_processed= + # strip trailing slash possibly added by bash completion + [[ "${target}" == */ ]] && target="${target: : -1}" + # is target a fully qualified image id? + if [[ "${target}" == *"/"* ]]; then + expand_image_id "${target}" "${_IMAGE_PATH}" || die "Couldn't find a image dir for ${target}" + expanded+=( "${target}" ) + else + # is target an image id with omitted namespace? + if [[ -n "${_NAMESPACE_DEFAULT}" ]]; then + expand_image_id "${_NAMESPACE_DEFAULT}/${target}" "${_IMAGE_PATH}" \ + && expanded+=( "${_NAMESPACE_DEFAULT}/${target}" ) && is_processed=1 fi - expand_image_id "${target}" "${_IMAGE_PATH}" - [[ ! -d "${__expand_image_id}" ]] && die "Couldn't find image ${target} in ${_NAMESPACE_DIR}" - expanded+=" ${current_ns}/${target}" - done - else - local is_processed - for target in "${target_ids[@]}"; do - is_processed= - # strip trailing slash possibly added by namespace bash completion - [[ "${target}" == */ ]] && target="${target: : -1}" - # is target a fully qualified image id? - if [[ "${target}" == *"/"* ]]; then - expand_image_id "${target}" "${_IMAGE_PATH}" - [[ ! -d "${__expand_image_id}" ]] && die "Couldn't find image ${target} in ${_NAMESPACE_DIR}" - expanded+=" ${target}" - else - # is target an image id with omitted namespace? - if [[ -n "${_NAMESPACE_DEFAULT}" ]]; then - expand_image_id "${_NAMESPACE_DEFAULT}/${target}" "${_IMAGE_PATH}" - if [[ -d "${__expand_image_id}" ]]; then - expanded+=" ${_NAMESPACE_DEFAULT}/${target}" - is_processed=1 - fi - fi - # ..if not it should be a namespace, expand to image ids - if [[ -z "${is_processed}" ]]; then - [[ ! -d "${_NAMESPACE_DIR}/${target}/${_IMAGE_PATH}" ]] \ - && die "Couldn't find namespace ${target} in ${_NAMESPACE_DIR}" - pushd "${_NAMESPACE_DIR}" > /dev/null || die "pushd error on directory ${_NAMESPACE_DIR}" - for image in "${target}/${_IMAGE_PATH}"*; do - expanded+=" ${image/${_IMAGE_PATH}/}" - done + # ..if not it should be a namespace, expand to image ids + if [[ -z "${is_processed}" ]]; then + get_ns_include_path "${target}" \ + || die "Couldn't find namespace dir ${target} in ${_NAMESPACE_DIR}" + pushd "${__get_ns_include_path}" > /dev/null || die "pushd error on directory ${_NAMESPACE_DIR}" + if ! dir_has_subdirs "${__get_ns_include_path}/${_IMAGE_PATH}"; then + msg_error "Namespace ${target} has no images yet. To create an image run:" + msg_info_sub + msg_info_sub "$ kubler new image ${target}/" + msg_info_sub popd > /dev/null || die "popd failed in function expand_requested_target_ids" + die fi + for image in "${_IMAGE_PATH}"*; do + expanded+=( "${target}/${image/${_IMAGE_PATH}/}" ) + done + popd > /dev/null || die "popd failed in function expand_requested_target_ids" fi - done - fi + fi + done # shellcheck disable=SC2034 - __expand_requested_target_ids=${expanded} + __expand_requested_target_ids=( "${expanded[@]}" ) } # Sets __find_in_parents to path where given search_path exists, or empty string if it doesn't. @@ -475,33 +529,44 @@ function find_in_parents() { # Set the _NAMESPACE_DIR and _NAMESPACE_TYPE variables for given working_dir # Types: -# local - path is inside kubler project root -# multi - directory with multiple namespaces outside of project root -# single - only a single namespace dir outside of project root +# local - working_dir is inside KUBLER_DATA_DIR +# multi - working_dir has multiple namespaces +# single - working_dir has only a single namespace # none - only allowed when creating a new namespace # # 1: working_dir function detect_namespace() { local working_dir real_ns_dir parent_dir parent_conf working_dir="$1" - _global_conf="${_KUBLER_DIR}/${_KUBLER_CONF}" - _ns_conf="${_global_conf}" + # deny executing kubler inside it's main script folder + if [[ "${working_dir}" == "${_KUBLER_DIR}" || "${working_dir}" == "${_KUBLER_DIR}"/* ]]; then + if [[ "${_is_terminal}" == 'true' ]]; then + die "Kubler execution forbidden in --working-dir ${working_dir}" + else + # silent exit if not in a terminal to handle possible bash-completion invocation, etc + exit 1 + fi + fi + _kubler_system_conf=/etc/"${_KUBLER_CONF}" + [[ ! -f /etc/"${_kubler_system_conf}" ]] && _kubler_system_conf="${_KUBLER_DIR}/${_KUBLER_CONF}" + _kubler_user_conf="${KUBLER_DATA_DIR}/${_KUBLER_CONF}" + _kubler_ns_conf="${_kubler_user_conf}" get_absolute_path "${working_dir}" # shellcheck disable=SC2154 - [[ -d "${__get_absolute_path}" ]] || die "Couldn't find namespace location: ${working_dir}" + [[ -d "${__get_absolute_path}" ]] || die "fatal: Couldn't find namespace location: ${working_dir}" # find next namespace dir, respect symlink paths, as in don't resolve find_in_parents "${working_dir}" "${_KUBLER_CONF}" real_ns_dir="${__find_in_parents}" - # working dir inside kubler project root? - if [[ "${working_dir}" == "${_KUBLER_DIR}" || "${working_dir}" == "${_KUBLER_DIR}/"* ]]; then + # working dir inside kubler data dir? + if [[ "${working_dir}" == "${KUBLER_DATA_DIR}" || "${working_dir}" == "${KUBLER_DATA_DIR}"/* ]]; then # ..and inside a namespace dir? if [[ -d "${real_ns_dir}/${_IMAGE_PATH}" ]]; then readonly _NAMESPACE_DEFAULT="$(basename -- "${real_ns_dir}")" fi - real_ns_dir="${_KUBLER_DIR}"/dock + real_ns_dir="${_KUBLER_NAMESPACE_DIR}" readonly _NAMESPACE_TYPE='local' else # allow missing namespace dir for new command, the user might want to create a new namespace @@ -511,11 +576,10 @@ function detect_namespace() { real_ns_dir="${working_dir}" readonly _NAMESPACE_TYPE='none' else - die "Couldn't find ${_KUBLER_CONF} in current or parent directories starting from ${working_dir} - Either cd to Kubler's project root or cd-into/create an external namespace dir." + die "Current --working-dir is not a Kubler namespace: ${working_dir}" fi fi - _ns_conf="${real_ns_dir}/${_KUBLER_CONF}" + _kubler_ns_conf="${real_ns_dir}/${_KUBLER_CONF}" parent_dir="$(dirname -- "${real_ns_dir}")" parent_conf="${parent_dir}/${_KUBLER_CONF}" @@ -525,11 +589,11 @@ function detect_namespace() { if [[ ! -f "${parent_conf}" ]]; then readonly _NAMESPACE_TYPE='single' _current_namespace="${_NAMESPACE_DEFAULT}" - # just for BC and to make build.conf/templates a bit more consistent to use. not used otherwise - NAMESPACE="${_current_namespace}" + # just for BC and to make build.conf/templates a bit more consistent to use. unused otherwise internally + export NAMESPACE="${_current_namespace}" else real_ns_dir="${parent_dir}" - _ns_conf="${parent_conf}" + _kubler_ns_conf="${parent_conf}" fi fi @@ -537,15 +601,17 @@ function detect_namespace() { # else assume multi mode [[ -z "${_NAMESPACE_TYPE}" ]] && readonly _NAMESPACE_TYPE='multi' - # read namespace config first.. - # shellcheck disable=SC1090 - [[ -f "${_ns_conf}" ]] && source "${_ns_conf}" + # Read system config at /etc/kubler.conf or _KUBLER_DIR/kubler.conf first.. + # shellcheck source=kubler.conf + file_exists_or_die "${_kubler_system_conf}" && source "${_kubler_system_conf}" - # ..then project conf to initialize any missing defaults + # ..then possible user config at KUBLER_DATA_DIR/kubler.conf # shellcheck source=kubler.conf - [[ "${_ns_conf}" != "${_global_conf}" ]] && source "${_global_conf}" + [[ -f "${_kubler_user_conf}" ]] && source "${_kubler_user_conf}" - [[ "${_NAMESPACE_TYPE}" == 'single' ]] && source_build_engine + # ..then current namespace config + # shellcheck source=kubler.conf + [[ "${_kubler_ns_conf}" != "${_kubler_user_conf}" && -f "${_kubler_ns_conf}" ]] && source "${_kubler_ns_conf}" # just for well formatted output get_absolute_path "${real_ns_dir}" @@ -561,9 +627,8 @@ function add_documentation_header() { local image image_type image_path doc_file header image="$1" image_type="$2" - expand_image_id "${image}" "${image_type}" - get_abs_ns_path "${__expand_image_id}" - image_path="${__get_abs_ns_path}" + expand_image_id "${image}" "${image_type}" || die "Couldn't find image ${image}" + image_path="${__expand_image_id}" doc_file="${image_path}/PACKAGES.md" header="### ${image}:${IMAGE_TAG}" get_image_size "${image}" "${IMAGE_TAG}" diff --git a/lib/engine/acbuild.sh b/lib/engine/acbuild.sh deleted file mode 100644 index 63ecef33..00000000 --- a/lib/engine/acbuild.sh +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2014-2017, Erik Dannenberg -# All rights reserved. - -### -### REQUIRED FUNCTIONS -### - -# Is this engine functional? Called once per engine in current image dependency graph. -function validate_engine() { - #_required_binaries+=" some-command some-other-command" - #has_required_binaries - msg "validate dummy engine" -} - -# Has given image_id all requirements to start the build? Called once per image in current image dependency graph. -# -# Arguments: -# 1: image_id (i.e. kubler/busybox) -# 2: image_type ($_IMAGE_PATH or $_BUILDER_PATH) -function validate_image() { - local image_id image_type - image_id="$1" - image_type="$2" - expand_image_id "${image_id}" "${image_type}" - #file_exists_or_die "${__expand_image_id}/foo.conf" - msg "validate dummy repo: ${__expand_image_id}" -} - -# Build the image for given image_id -# -# Arguments: -# 1: image_id (i.e. kubler/busybox) -function build_image() { - local image_id image_type - image_id="${1}" - image_type="${2:-${_IMAGE_PATH}}" - msg "building dummy image: ${image_id}" - # finish PACKAGES.md when using build-root.sh once the build is done: - #add_documentation_header "${image_id}" "${_IMAGE_PATH}" || die "failed to generate PACKAGES.md for ${image_id}" -} - -# Exits with signal 0 if given image_id has a built and ready to run image or signal 3 if not. -# -# Arguments: -# 1: image_id (i.e. kubler/busybox) -# 2: image_tag - optional, default: ${IMAGE_TAG} -function image_exists() { - local image_id image_type image_tag - image_id="$1" - image_tag="${2:-${IMAGE_TAG}}" - return 3 -} - -# Sets __get_image_size for given image_id, required for generating PACKAGES.md header -# -# Arguments: -# 1: image_id (i.e. kubler/busybox) -# 2: image_tag (a.k.a. version) -function get_image_size() { - # assume failure - __get_image_size= - local image_id image_tag - image_id="$1" - image_tag="$2" - # determine image size - __get_image_size="xxMB" -} - -# Start a container from given image_id. -# -# Arguments: -# 1: image_id (i.e. kubler/busybox) -# 2: container_host_name -# 3: remove container after it exists, optional, default: true -# 4: container_name, optional, keep in mind that this needs to be unique for all existing containers on the host -function run_image() { - local image_id container_host_name auto_rm container_name - image_id="$1" - container_host_name="$2" - auto_rm="${3:-true}" - container_name="$4" - msg "running dummy image: ${image_id} with" - msg " mounts: ${_container_mounts[@]}" - msg " env: ${container_env[@]}" - msg " cmd: ${container_cmd[@]}" -} - - -### -### OPTIONAL (to implement that is, deleting any stubs is asking for trouble) -### - - -# This function is called once per stage3 build container and should -# bootstrap a stage3 with portage plus helper files from /bob-core. -# -# Arguments: -# 1: builder_id (i.e. kubler/bob) -function build_core() { - local builder_id - builder_id="$1" - #download_portage_snapshot - #download_stage3 - msg "building dummy core" -} - -# Produces a build container image for given builder_id -# Implement this if you want support for multiple build containers. -# -# Arguments: -# 1: builder_id (i.e. kubler/bob) -function build_builder() { - local builder_id - builder_id="$1" - msg "building dummy builder: ${DEFAULT_BUILDER}" -} - -# Called when using --no-deps, in most cases a thin wrapper to build_image() -# -# Arguments: -# 1: image_id (i.e. kubler/busybox) -function build_image_no_deps() { - local image_id - image_id="$1" - # build the image - build_image "${image_id}" -} - -# Sets __get_build_container to the builder_id required for building given image_id or signal 3 if not found/implemented. -# -# Arguments: -# 1: image_id -# 2: image_type ($_IMAGE_PATH or $_BUILDER_PATH) -function get_build_container() { - # assume failure - __get_build_container= - local image_id image_type - image_id="${1}" - image_type="${2:-${_IMAGE_PATH}}" - #__get_build_container="kubler/bob" - exit 3 -} - -# Handle image repository auth, called once per namespace if pushing -# -# Arguments: -# 1: namespace (i.e. kubler) -# 2: repository_url -function push_auth() { - local namespace repository_url login_args - namespace="$1" - repository_url="$2" - msg "logging into dummy repository" -} - -# Push image to a repository -# -# Arguments: -# 1: image_id (i.e. kubler/busybox) -# 2: repository_url -function push_image() { - local image_id repository_url - image_id="$1" - repository_url="$2" - msg "pushing ${image_id} to dummy repository at ${repository_url}" -} diff --git a/lib/engine/docker.sh b/lib/engine/docker.sh index d801a89d..9221d78a 100644 --- a/lib/engine/docker.sh +++ b/lib/engine/docker.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2014-2017, Erik Dannenberg +# Copyright (c) 2014-2019, Erik Dannenberg # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the @@ -10,7 +10,7 @@ # disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -# following disclaimer in the documentation and/or other materials provided with the distribution. +# following disclaimer in the documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -25,7 +25,8 @@ DOCKER_IO=$(command -v docker.io) DOCKER="${DOCKER:-${DOCKER_IO:-docker}}" DOCKER_BUILD_OPTS="${DOCKER_BUILD_OPTS:-}" -_container_mount_portage="false" +_container_mount_portage='false' +_portage_image_processed='false' # Is this engine functional? Called once per engine in current image dependency graph. function validate_engine() { @@ -39,32 +40,29 @@ function validate_engine() { # # Arguments: # 1: image_id (i.e. kubler/busybox) -# 2: image_type ($_IMAGE_PATH or $_BUILDER_PATH) +# 2: image_path function validate_image() { - local image_id image_type + local image_id image_path image_id="$1" - image_type="$2" - expand_image_id "${image_id}" "${image_type}" - # shellcheck disable=SC2154 - get_abs_ns_path "${__expand_image_id}" + image_path="$2" # shellcheck disable=SC2154 - file_exists_or_die "${__get_abs_ns_path}/Dockerfile.template" + file_exists_or_die "${image_path}/Dockerfile.template" } -# Generate Dockerfile from Dockerfile.template +# Generate Dockerfile from Dockerfile.template for given absolute image_path. # # Arguments: -# 1: image_path (i.e. kubler/images/busybox) +# 1: image_path function generate_dockerfile() { local image_path sed_param bob_var image_path="$1" sed_param=() - # also make variables starting with BOB_ available in Dockerfile.template + [[ ! -f "${image_path}"/Dockerfile.template ]] && die "Couldn't read ${image_path}/Dockerfile.template" + # make variables starting with BOB_ available in Dockerfile.template for bob_var in ${!BOB_*}; do sed_param+=(-e "s|\${${bob_var}}|${!bob_var}|") done - get_abs_ns_path "${image_path}" - image_path="${__get_abs_ns_path}" + # shellcheck disable=SC2016,SC2153,SC2154 sed "${sed_param[@]}" \ -e 's|${IMAGE_PARENT}|'"${IMAGE_PARENT}"'|g' \ @@ -73,7 +71,7 @@ function generate_dockerfile() { -e 's/${TAG}/'"${IMAGE_TAG}"'/g' \ -e 's/${MAINTAINER}/'"${AUTHOR}"'/g' \ "${image_path}/Dockerfile.template" > "${image_path}/Dockerfile" \ - || die "error while generating ${image_path}/Dockerfile" + || die "Error while generating ${image_path}/Dockerfile" } # Returns given tag value from dockerfile or exit signal 3 if tag was not found. @@ -81,20 +79,18 @@ function generate_dockerfile() { # # Arguments: # 1: tag (i.e. FROM) -# 2: image_path (i.e. kubler/images/busybox) +# 2: image_path function get_dockerfile_tag() { __get_dockerfile_tag= local tag image_path dockerfile grep_out regex tag="$1" image_path="$2" - get_abs_ns_path "${image_path}" - image_path="${__get_abs_ns_path}" dockerfile="${image_path}/Dockerfile" file_exists_or_die "${dockerfile}" grep_out="$(grep ^"${tag}" "${dockerfile}")" regex="^${tag} ?(.*)?" if [[ "${grep_out}" =~ $regex ]]; then - if [[ "${BASH_REMATCH[1]}" ]]; then + if [[ -n "${BASH_REMATCH[1]}" ]]; then # shellcheck disable=SC2034 __get_dockerfile_tag="${BASH_REMATCH[1]}" else @@ -106,59 +102,50 @@ function get_dockerfile_tag() { # Remove image from local image store # # Arguments: -# # 1: image_id # 2: image_tag +# 3: remove_by_id - optional, rm the image via Docker image id which might also remove other tags that ref the same id function remove_image() { - local image_id image_tag + local image_id image_tag remove_by_id image_id="$1" image_tag="${2:-${IMAGE_TAG}}" - "${DOCKER}" rmi -f "${image_id}:${image_tag}" || die "failed to remove image ${image_id}:${image_tag}" + remove_by_id="${3:-false}" + + image_id="${image_id}:${image_tag}" + + if [[ "${remove_by_id}" == 'true' ]]; then + image_id="$("${DOCKER}" images "${image_id}" -q)" + fi + + "${DOCKER}" rmi -f "${image_id}" 1> /dev/null || die "Failed to remove image ${image_id}" } # Build the image for given image_id # # Arguments: # 1: image_id (i.e. kubler/busybox) -# 2: image_type - $_IMAGE_PATH or $_BUILDER_PATH, defaults to $_IMAGE_PATH +# 2: image_path +# 3: image_type - $_IMAGE_PATH or $_BUILDER_PATH, defaults to $_IMAGE_PATH +# 4: skip_rootfs - optional, default: false function build_image() { - local image_id image_type image_expanded image_path builder_id builder_commit_id current_image bob_var run_id - image_id="${1}" - image_type="${2:-${_IMAGE_PATH}}" - expand_image_id "${image_id}" "${image_type}" - image_expanded="${__expand_image_id}" - get_abs_ns_path "${image_expanded}" - image_path="${__get_abs_ns_path}" - - msg "--> build image ${image_id}" - image_exists_or_rm "${image_id}" "${image_type}" - local exists_return=$? - [[ ${exists_return} -eq 0 ]] && return 0 - # if the builder image does not exist we need to ensure there is no pre-existing rootfs.tar - if [[ ${exists_return} -eq 3 && "${image_type}" == "${_BUILDER_PATH}" ]]; then - [[ -f "${image_path}/rootfs.tar" ]] && rm "${image_path}/rootfs.tar" - fi - - generate_dockerfile "${image_expanded}" - - # build rootfs? - # shellcheck disable=SC2154 - if [[ ! -f "${image_path}/rootfs.tar" || "${_arg_force_full_image_build}" == 'on' ]] && \ - [[ "${image_type}" == "${_IMAGE_PATH}" || "${image_id}" != "${_current_namespace}"/*-core ]]; then + local image_id image_type image_path skip_rootfs builder_id builder_commit_id current_image bob_var run_id exit_sig + image_id="$1" + image_path="$2" + image_type="${3:-${_IMAGE_PATH}}" + skip_rootfs="$4" - msg "--> phase 1: building root fs" - - # save value of target image's PARENT_BUILDER_MOUNTS config as get_build_container() may override the ENV - unset _use_parent_builder_mounts - # shellcheck disable=SC2034 - [[ "${PARENT_BUILDER_MOUNTS}" == 'true' ]] && _use_parent_builder_mounts='true' + # add current image id to output logging + add_status_value "${image_id}" + local missing_builder + missing_builder= + if [[ "${skip_rootfs}" != 'true' ]]; then get_build_container "${image_id}" "${image_type}" builder_id="${__get_build_container}" # determine build container commit id builder_commit_id="" - current_image=${image_id##*/} + current_image="${image_id##*/}" if [[ -n "${BUILDER}" ]]; then builder_commit_id="${BUILDER##*/}-${current_image}" elif [[ "${image_type}" == "${_IMAGE_PATH}" ]]; then @@ -173,10 +160,41 @@ function build_image() { builder_id="${image_id}-core" builder_commit_id="${image_id##*/}" fi + # always rebuild if builder image doesn't exist + if ! image_exists "${_current_namespace}/${builder_commit_id}"; then + [[ -f "${image_path}/rootfs.tar" ]] && rm "${image_path}/rootfs.tar" + missing_builder='true' + fi + fi + + image_exists_or_rm "${image_id}" "${image_type}" + exit_sig=$? + [[ -z "${missing_builder}" && ${exit_sig} -eq 0 ]] \ + && msg_ok "skipped, already built." && return 0 + + # if the builder image does not exist we need to ensure there is no pre-existing rootfs.tar + if [[ ${exit_sig} -eq 3 && "${image_type}" == "${_BUILDER_PATH}" ]]; then + [[ -f "${image_path}/rootfs.tar" ]] && rm "${image_path}/rootfs.tar" + fi + + generate_dockerfile "${image_path}" + + # build rootfs? + # shellcheck disable=SC2154 + if [[ ! -f "${image_path}/rootfs.tar" || "${_arg_force_full_image_build}" == 'on' ]] && \ + [[ "${skip_rootfs}" != 'true' ]]; then + + # save value of target image's PARENT_BUILDER_MOUNTS config as get_build_container() may override the ENV + unset _use_parent_builder_mounts + # shellcheck disable=SC2034 + [[ "${PARENT_BUILDER_MOUNTS}" == 'true' ]] && _use_parent_builder_mounts='true' + + [[ ! -d "${KUBLER_DISTFILES_DIR}" ]] && mkdir -p "${KUBLER_DISTFILES_DIR}" + [[ ! -d "${KUBLER_PACKAGES_DIR}" ]] && mkdir -p "${KUBLER_PACKAGES_DIR}" _container_mounts=("${image_path}:/config" - "${_KUBLER_DIR}/tmp/distfiles:/distfiles" - "${_KUBLER_DIR}/tmp/packages:/packages" + "${KUBLER_DISTFILES_DIR}:/distfiles" + "${KUBLER_PACKAGES_DIR}:/packages" ) [[ ${#BUILDER_MOUNTS[@]} -gt 0 ]] && _container_mounts+=("${BUILDER_MOUNTS[@]}") @@ -191,33 +209,162 @@ function build_image() { _container_cmd=("kubler-build-root") _container_mount_portage="true" - msg "using ${builder_id}:${IMAGE_TAG}" - - run_id="${image_id//\//-}-${$}-${RANDOM}" - run_image "${builder_id}:${IMAGE_TAG}" "${image_id}" "false" "${run_id}" \ - || die "failed to build rootfs for ${image_expanded}" + run_id="rootfs-builder-${image_id//\//-}-${$}-${RANDOM}" + if [[ "${image_type}" == "${_IMAGE_PATH}" ]]; then + _status_msg="build root-fs using ${builder_id}:${IMAGE_TAG}" + else + _status_msg="bootstrap builder environment" + fi + pwrap run_image "${builder_id}:${IMAGE_TAG}" "${image_id}" "false" "${run_id}" \ + || die "${_status_msg}" - _container_mount_portage="false" + _container_mount_portage='false' - msg "commit ${run_id} as ${_current_namespace}/${builder_commit_id}:${IMAGE_TAG}" - "${DOCKER}" commit "${run_id}" "${_current_namespace}/${builder_commit_id}:${IMAGE_TAG}" || - die "failed to commit ${_current_namespace}/${builder_commit_id}:${IMAGE_TAG}" + _status_msg="commit ${run_id} as image ${_current_namespace}/${builder_commit_id}:${IMAGE_TAG}" + pwrap 'nolog' "${DOCKER}" commit "${run_id}" "${_current_namespace}/${builder_commit_id}:${IMAGE_TAG}" \ + || die "${_status_msg}" - "${DOCKER}" rm "${run_id}" || die "failed to remove container ${run_id}" + _status_msg="remove container ${run_id}" + pwrap 'nolog' "${DOCKER}" rm "${run_id}" || die "${_status_msg}" - msg "tag ${_current_namespace}/${builder_commit_id}:latest" - "${DOCKER}" tag "${_current_namespace}/${builder_commit_id}:${IMAGE_TAG}" "${_current_namespace}/${builder_commit_id}:latest" \ - || die "failed to tag ${builder_commit_id}" + _status_msg="tag image ${_current_namespace}/${builder_commit_id}:latest" + pwrap 'nolog' "${DOCKER}" tag "${_current_namespace}/${builder_commit_id}:${IMAGE_TAG}" \ + "${_current_namespace}/${builder_commit_id}:latest" \ + || { msg_error "${_status_msg}"; die; } fi - msg "--> phase 2: build ${image_id}:${IMAGE_TAG}" + _status_msg="exec docker build -t ${image_id}:${IMAGE_TAG}" # shellcheck disable=SC2086 - "${DOCKER}" build ${DOCKER_BUILD_OPTS} -t "${image_id}:${IMAGE_TAG}" "${image_path}" || die "failed to build ${image_expanded}" + pwrap "${DOCKER}" build ${DOCKER_BUILD_OPTS} -t "${image_id}:${IMAGE_TAG}" "${image_path}" || die "${_status_msg}" - msg "tag ${image_id}:latest" - "${DOCKER}" tag "${image_id}:${IMAGE_TAG}" "${image_id}:latest" || die "failed to tag ${image_expanded}" + _status_msg="tag image ${image_id}:latest" + pwrap 'nolog' "${DOCKER}" tag "${image_id}:${IMAGE_TAG}" "${image_id}:latest" || die "${_status_msg}" - add_documentation_header "${image_id}" "${image_type}" || die "failed to generate PACKAGES.md for ${image_expanded}" + add_documentation_header "${image_id}" "${image_type}" || die "Failed to generate PACKAGES.md for ${image_id}" + local has_tests done_text + [[ -n "${POST_BUILD_HC}" || -f "${image_path}/build-test.sh" ]] && has_tests='true' + + # shellcheck disable=SC2154 + msg "${_term_cup}" + if [[ -n "${has_tests}" ]]; then + test_image "${image_id}:${IMAGE_TAG}" "${image_path}" + else + done_text='done.' + [[ -z "${has_tests}" && "${image_type}" != "${_BUILDER_PATH}" ]] \ + && done_text="${done_text} no tests. ;(" + msg_ok "${done_text}" + fi +} + +# +# Arguments: +# 1: image_id +# 2: image_path +test_image() { + local image_id image_path exit_sig container_name + image_id="${1}" + image_path="${2}" + + # run build-test.sh in a test container + if [[ -f "${image_path}"/build-test.sh ]]; then + container_name="build-test-${image_id//[\:\/]/-}" + _container_mounts=( "${image_path}:/kubler-test/" ) + _container_cmd=( '/kubler-test/build-test.sh' ) + _status_msg="exec build-test.sh in container ${container_name}" + pwrap run_image "${image_id}" "${image_id}" 'true' "${container_name}" 'false' + exit_sig=$? + [[ ${exit_sig} -gt 0 ]] \ + && die "build-test.sh for image ${image_id} failed with exit signal: ${exit_sig}" + fi + + # run a detached container and monitor Docker's health-check status + if [[ -n "${POST_BUILD_HC}" ]]; then + local hc_current_duration hc_healthy_streak hc_failed_streak hc_itr hc_status hc_log status_tmpl hc_streak_bar + POST_BUILD_HC_MAX_DURATION="${POST_BUILD_HC_MAX_DURATION:-30}" + POST_BUILD_HC_INTERVAL="${POST_BUILD_HC_INTERVAL:-5}" + POST_BUILD_HC_TIMEOUT="${POST_BUILD_HC_TIMEOUT:-5}" + POST_BUILD_HC_START_PERIOD="${POST_BUILD_HC_START_PERIOD:-3}" + POST_BUILD_HC_RETRIES="${POST_BUILD_HC_RETRIES:-3}" + POST_BUILD_HC_MIN_HEALTHY_STREAK="${POST_BUILD_HC_MIN_HEALTHY_STREAK:-5}" + + container_name="health-check-${image_id//[\:\/]/-}" + _container_mounts=() + _container_cmd=() + _container_args=( '-d' + '--health-interval' "${POST_BUILD_HC_INTERVAL}s" + '--health-retries' "${POST_BUILD_HC_RETRIES}" + '--health-start-period' "${POST_BUILD_HC_START_PERIOD}s" + '--health-timeout' "${POST_BUILD_HC_TIMEOUT}s" ) + + # shellcheck disable=SC2064 + _handle_hc_container_run_args="${container_name}" + add_trap_fn 'handle_hc_container_run' + _status_msg="monitor health-check of container ${container_name}" + pwrap run_image "${image_id}" "${image_id}" 'true' "${container_name}" + _status_msg="health-check startup time is ${POST_BUILD_HC_START_PERIOD}s" + pwrap 'nolog' sleep "${POST_BUILD_HC_START_PERIOD}" + hc_current_duration=0 + hc_healthy_streak=0 + hc_failed_streak=0 + hc_itr=0 + repeat_str '-' "${POST_BUILD_HC_MIN_HEALTHY_STREAK}" + # shellcheck disable=SC2154 + hc_streak_bar="${__repeat_str}" + hc_status='n/a' + msg -e "" + # shellcheck disable=SC2154 + [[ "${_is_terminal}" == 'false' ]] && msg_info "monitor docker health-check\n" + + while [[ ${POST_BUILD_HC_MAX_DURATION} -gt ${hc_current_duration} ]]; do + if [[ ${hc_itr} -ge ${POST_BUILD_HC_INTERVAL} ]]; then + hc_status="$("${DOCKER}" inspect "${container_name}" | jq '.[] | .State.Health.Status')" + hc_log="$("${DOCKER}" inspect "${container_name}" | jq '.[] | .State.Health.Log[4].Output')" + [[ "${hc_status}" == '"healthy"' ]] && hc_healthy_streak=$((hc_healthy_streak + 1)) + [[ "${hc_status}" == '"unhealthy"' ]] && hc_failed_streak=$((hc_failed_streak + 1)) + repeat_str '*' "${hc_healthy_streak}" + # shellcheck disable=SC2154 + hc_streak_bar="${_term_green}${__repeat_str}${_term_reset}" + repeat_str '-' $(( POST_BUILD_HC_MIN_HEALTHY_STREAK - hc_healthy_streak )) + hc_streak_bar="${hc_streak_bar}${__repeat_str}" + hc_itr=0 + fi + status_tmpl="health-check status: " + # shellcheck disable=SC2154 + status_tmpl="${status_tmpl}${_term_yellow}[${_term_reset}up: %ss${_term_yellow}]-${_term_reset}" + status_tmpl="${status_tmpl}${_term_yellow}[${_term_reset}next: %ss${_term_yellow}]-${_term_reset}" + status_tmpl="${status_tmpl}${_term_yellow}[${_term_reset}%s${_term_yellow}]-${_term_reset}" + status_tmpl="${status_tmpl}${_term_yellow}[${_term_reset}%s${_term_yellow}]${_term_reset}" + # shellcheck disable=SC2059 + printf -v _status_msg "${status_tmpl}" \ + "${hc_current_duration}" \ + $(( POST_BUILD_HC_INTERVAL - hc_itr )) \ + "${hc_status}" \ + "${hc_streak_bar}" + [[ "${_is_terminal}" == 'true' ]] && status_with_spinner "${_status_msg}" + [[ ${hc_healthy_streak} -ge ${POST_BUILD_HC_MIN_HEALTHY_STREAK} ]] && break + hc_itr=$(( hc_itr + 1 )) + hc_current_duration=$(( hc_current_duration + 1 )) + sleep 1 + done + rm_trap_fn 'handle_hc_container_run' + + stop_container "${container_name}" 'false' + # shellcheck disable=SC2154 + msg "${_term_cup}${_term_ceol}${_term_cup}" + [[ ${hc_healthy_streak} -lt ${POST_BUILD_HC_MIN_HEALTHY_STREAK} ]] && \ + die "health-check failed: timeout after ${POST_BUILD_HC_MAX_DURATION}s. docker inspect log:\n${hc_log}" + fi + # shellcheck disable=SC2154 + msg "${_term_cup}" + msg_ok "done." +} + +function handle_hc_container_run() { + local container_id + container_id="${_handle_hc_container_run_args}" + echo -e "" + msg_error "aborting.. stopping health-check test container ${container_id}" + container_exists "${container_id}" && stop_container "${container_id}" 'false' } # Check if container exists for given container_name, exit with signal 3 if it does not @@ -231,22 +378,25 @@ function container_exists() { return 0 } -# Check if image exists, remove existing image depending on passed rebuild args. (-f, -F, -c, -C) -# Returns signal 0 if image exists, or signal 3 if not/image was removed due to rebuild args. +# Check if image exists, remove existing image depending on passed build args. (-f, -F, -c, -C) +# Returns signal 0 if image exists, or signal 3 if not or image was removed due to build args. # # Arguments: # 1: image_id (i.e. kubler/busybox) # 2: image_type - $_IMAGE_PATH or $_BUILDER_PATH, optional, default: $_IMAGE_PATH # 3: image_tag - optional, default: $IMAGE_TAG -function image_exists_or_rm { +function image_exists_or_rm() { local image_id image_type image_id="${1}" image_type="${2:-${_IMAGE_PATH}}" image_tag="${3:-${IMAGE_TAG}}" image_exists "${image_id}" "${image_tag}" || return $? # shellcheck disable=SC2154 - if [[ "${_arg_clear_everything}" == 'on' && "${image_id}" != "${_STAGE3_NAMESPACE}/portage" ]]; then - # -C => nuke everything except portage + if [[ "${_arg_clear_everything}" == 'on' ]] \ + && [[ "${image_id}" != "${_PORTAGE_IMAGE}" || "${KUBLER_PORTAGE_GIT}" != 'true' ]] + then + [[ "${image_id}" == "${_PORTAGE_IMAGE}" ]] && stop_container "${_PORTAGE_CONTAINER}" + # -C remove_image "${image_id}" "${image_tag}" return 3 elif [[ "${_arg_clear_build_container}" == 'on' && "${image_type}" == "${_BUILDER_PATH}" ]]; then @@ -256,8 +406,9 @@ function image_exists_or_rm { return 3 fi elif [[ "${_arg_force_image_build}" == 'on' || "${_arg_force_full_image_build}" == 'on' ]]; then - # -f, -F => rebuild image if not a builder - [[ "${image_type}" != "${_BUILDER_PATH}" ]] && remove_image "${image_id}" "${image_tag}" && return 3 + # -f, -F => rebuild image if not a builder or portage + [[ "${image_type}" != "${_BUILDER_PATH}" && "${image_id}" != "${_PORTAGE_IMAGE}" ]] \ + && remove_image "${image_id}" "${image_tag}" && return 3 fi return 0 } @@ -300,12 +451,15 @@ function get_image_size() { # 2: container_host_name # 3: remove container after it exists, optional, default: true # 4: container_name, optional, keep in mind that this needs to be unique for all existing containers on the host +# 5: exit_on_error, optional, if false will just return the exit signal instead of aborting, default: true function run_image() { - local image_id container_host_name auto_rm container_name docker_env denv docker_mounts dmnt + local image_id container_host_name auto_rm container_name exit_on_error docker_env denv docker_mounts dmnt image_id="$1" container_host_name="$2" auto_rm="${3:-true}" - container_name="${4:-${image_id//\//-}-${$}-${RANDOM}}" + container_name="${4:-${IMAGE_TAG}}" + container_name="${container_name//[\:\/]/-}" + exit_on_error="${5:-true}" # docker env options docker_env=() for denv in "${_container_env[@]}"; do @@ -324,68 +478,130 @@ function run_image() { [[ "${_container_mount_portage}" == "true" ]] && docker_args+=("--volumes-from" "${_PORTAGE_IMAGE//\//-}") # shellcheck disable=SC2154 [[ ${#_container_args[@]} -gt 0 ]] && docker_args+=("${_container_args[@]}") + local exit_sig # shellcheck disable=SC2064 - trap "handle_container_run ${container_name}" EXIT - "${DOCKER}" run "${docker_args[@]}" "${docker_mounts[@]}" "${docker_env[@]}" "${image_id}" "${_container_cmd[@]}" \ - || die "Failed to run image ${image_id}" - trap - EXIT + _handle_container_run_args="${container_name}" + add_trap_fn 'handle_container_run' + "${DOCKER}" run "${docker_args[@]}" "${docker_mounts[@]}" "${docker_env[@]}" "${image_id}" "${_container_cmd[@]}" + exit_sig=$? + [[ ${exit_sig} -ne 0 && "${exit_on_error}" == 'true' ]] && die "Failed to run image ${image_id}" + rm_trap_fn 'handle_container_run' + return ${exit_sig} } -# 1: container_id +# Trap handler for run_image fn. function handle_container_run() { local container_id - container_id="$1" + container_id="${_handle_container_run_args}" if [[ -z "${NO_CLEANUP}" ]] && container_exists "${container_id}"; then - msg "--> remove ${container_id}, NO_CLEANUP env prevents this" + msg_error "removing ${container_id}, NO_CLEANUP env prevents this" "${DOCKER}" rm "${container_id}" 1> /dev/null fi } +# Arguments: +# 1: container_name - the container to stop +# 2: remove_container - optional, if true also removes the container, default: true +function stop_container() { + local container_name remove_container exit_sig + container_name="$1" + remove_container="${2:-true}" + "${DOCKER}" stop "${container_name}" 1> /dev/null + exit_sig=$? + [[ "${remove_container}" == 'true' ]] && { "${DOCKER}" rm "${container_name}" 1> /dev/null; exit_sig=$?; } + return "${exit_sig}" +} + # Docker import a portage snapshot as given portage_image_id # # Arguments: # 1: portage_image_id (i.e. bob/portage) # 2: image_tag (a.k.a. version) function import_portage_tree() { - local image_id image_tag portage_tmp_file + local image_id image_tag image_path portage_file portage_tmp_file image_id="$1" image_tag="$2" - image_exists "${image_id}" "${image_tag}" && return 0 + image_exists_or_rm "${image_id}" "${image_tag}" && return 0 + + # add current image id to output logging + add_status_value 'portage' + + _status_msg="download portage snapshot" + PORTAGE_DATE="${PORTAGE_DATE:-latest}" + portage_file="portage-${PORTAGE_DATE}.tar.xz" + _pwrap_callback=( 'cb_add_filesize_to_status' "${KUBLER_DOWNLOAD_DIR}/${portage_file//latest/${_TODAY}}" ) + pwrap download_portage_snapshot "${portage_file}" || die "Failed to download portage snapshot ${portage_file}" + + portage_file="${portage_file//latest/${_TODAY}}" - download_portage_snapshot || die "Failed to download portage snapshot" + image_path="${KUBLER_DATA_DIR}"/tmp/kubler-portage + [[ ! -d "${image_path}" ]] && mkdir -p "${image_path}" + cp "${_KUBLER_DIR}"/lib/bob-portage/Dockerfile.template "${image_path}"/ - msg "--> bootstrap ${image_id}" + add_trap_fn 'handle_import_portage_tree_error' # shellcheck disable=SC2154 - portage_tmp_file="${_KUBLER_DIR}/lib/bob-portage/${_portage_file}" - cp "${DOWNLOAD_PATH}/${_portage_file}" "${_KUBLER_DIR}/lib/bob-portage/" - export BOB_CURRENT_PORTAGE_FILE=${_portage_file} - - generate_dockerfile "${_KUBLER_DIR}/lib/bob-portage/" - "${DOCKER}" build -t "${image_id}:${PORTAGE_DATE}" "${_KUBLER_DIR}/lib/bob-portage/" || die "failed to tag" - rm "${_KUBLER_DIR}"/lib/bob-portage/Dockerfile "${portage_tmp_file}" - "${DOCKER}" tag "${image_id}:${PORTAGE_DATE}" "${image_id}:latest" || die "failed to tag" + portage_tmp_file="${image_path}/${portage_file}" + cp "${KUBLER_DOWNLOAD_DIR}/${portage_file}" "${portage_tmp_file}" + export BOB_CURRENT_PORTAGE_FILE="${portage_file}" + + _status_msg="bootstrap ${image_id} image" + generate_dockerfile "${image_path}" + pwrap "${DOCKER}" build -t "${image_id}:${image_tag}" "${image_path}" \ + || die "Failed to build ${image_id}:${image_tag}" + rm_trap_fn 'handle_import_portage_tree_error' + rm -r "${image_path}" + unset PORTAGE_DATE + _status_msg="tag image ${image_id}:latest" + pwrap "${DOCKER}" tag "${image_id}:${image_tag}" "${image_id}:latest" \ + || die "Failed to tag ${image_id}:${image_tag}" + _portage_image_processed='true' } +function handle_import_portage_tree_error() { + [[ -d "${KUBLER_DATA_DIR}"/tmp/kubler-portage ]] && rm -r "${KUBLER_DATA_DIR}"/tmp/kubler-portage +} # Docker import a stage3 tar ball for given stage3_image_id # # Arguments: # 1: stage3_image_id (i.e. bob/${STAGE3_BASE}) function import_stage3() { - local image_id cat_bin + local image_id cat_bin stage3_file image_id="${1//+/-}" - image_exists_or_rm "${image_id}" "${_BUILDER_PATH}" "${STAGE3_DATE}" && return 0 - download_stage3 || die "failed to download stage3 files" + fetch_stage3_archive_name || die "Couldn't find a stage3 file for ${ARCH_URL}" # shellcheck disable=SC2154 - msg "--> import ${image_id}:${STAGE3_DATE} using ${_stage3_file}" - cat_bin='bzcat' - [[ "${_stage3_file##*.}" == 'xz' ]] && cat_bin='xzcat' - "${cat_bin}" < "${DOWNLOAD_PATH}/${_stage3_file}" | bzip2 | "${DOCKER}" import - "${image_id}:${STAGE3_DATE}" \ - || die "failed to import ${_stage3_file}" + stage3_file="${__fetch_stage3_archive_name}" - msg "tag ${image_id}:latest" - "${DOCKER}" tag "${image_id}:${STAGE3_DATE}" "${image_id}:latest" || die "failed to tag" + image_exists_or_rm "${image_id}" "${_BUILDER_PATH}" "${STAGE3_DATE}" && return 0 + + _status_msg="download ${stage3_file}" + # shellcheck disable=SC2034 + _pwrap_callback=( 'cb_add_filesize_to_status' "${KUBLER_DOWNLOAD_DIR}/${stage3_file}" ) + pwrap download_stage3 "${stage3_file}" || die "Failed to download stage3 file" + + _status_msg="import ${stage3_file}" + pwrap import_tarball "${KUBLER_DOWNLOAD_DIR}/${stage3_file}" "${image_id}:${STAGE3_DATE}" \ + || die "Failed to import ${stage3_file}" + + _status_msg="tag ${image_id}:latest" + pwrap "${DOCKER}" tag "${image_id}:${STAGE3_DATE}" "${image_id}:latest" || die "Failed to tag ${image_id}:latest" +} + +# Create a new Docker image from a file archive. +# +# Arguments: +# +# 1: tarball_path - file to import, xz and bz only +# 2: image_id - docker image id for the new image +function import_tarball() { + local tarball_path image_id cat_bin + tarball_path="$1" + image_id="$2" + cat_bin='bzcat' + [[ "${tarball_path##*.}" == 'xz' ]] && cat_bin='xzcat' + "${cat_bin}" < "${tarball_path}" | bzip2 | "${DOCKER}" import - "${image_id}" \ + || return 1 } # This function is called once per stage3 build container and should @@ -396,32 +612,48 @@ function import_stage3() { function build_core() { local builder_id core_id image_path builder_id="$1" - import_portage_tree "${_PORTAGE_IMAGE}" "${PORTAGE_DATE}" + core_id="${builder_id}-core" + + # when -C is active this might get called multiple times for a build + [[ "${_portage_image_processed}" == 'false' ]] && import_portage_tree "${_PORTAGE_IMAGE}" "${PORTAGE_DATE}" # ensure the portage container is created container_exists "${_PORTAGE_CONTAINER}" [[ $? -eq 3 ]] && - msg "--> create portage container, this may take a few moments.. " && \ - "${DOCKER}" run '--name' "${_PORTAGE_CONTAINER}" "${_PORTAGE_IMAGE}" true + _status_msg="create the portage container" && \ + pwrap "${DOCKER}" run '--name' "${_PORTAGE_CONTAINER}" "${_PORTAGE_IMAGE}" true + + # add current image id to output logging + add_status_value "${core_id}" BOB_CURRENT_STAGE3_ID="${_STAGE3_NAMESPACE}/${STAGE3_BASE//+/-}" import_stage3 "${BOB_CURRENT_STAGE3_ID}" - core_id="${builder_id}-core" image_exists_or_rm "${core_id}" "${_BUILDER_PATH}" && return 0 expand_image_id "${core_id}" "${_BUILDER_PATH}" - get_abs_ns_path "${__expand_image_id}" - image_path="${__get_abs_ns_path}" - mkdir -p "${image_path}" + # shellcheck disable=SC2154 + image_path="${__expand_image_id}" + [[ ! -d "${image_path}" ]] && { mkdir -p "${image_path}" || die; } + _handle_build_core_error_args="${image_path}" + add_trap_fn 'handle_build_core_error' # copy build-root.sh and emerge defaults so we can access it via dockerfile context - cp -r "${_KUBLER_DIR}"/lib/bob-core/{*.sh,etc,Dockerfile.template} "${image_path}/" + cp -r "${_KUBLER_DIR}"/lib/bob-core/{*.sh,etc,Dockerfile.template} "${image_path}/" \ + || die "Could not create temporary image at ${image_path}" - generate_dockerfile "${__expand_image_id}" - build_image "${builder_id}-core" "${_BUILDER_PATH}" + generate_dockerfile "${image_path}" + build_image "${core_id}" "${image_path}" "${_BUILDER_PATH}" 'true' + rm_trap_fn 'handle_build_core_error' # clean up rm -r "${image_path}" + +} + +function handle_build_core_error() { + local image_path + image_path="${_handle_build_core_error_args}" + [[ -d "${image_path}" ]] && rm -r "${image_path}" } # Produces a build container image for given builder_id @@ -429,22 +661,26 @@ function build_core() { # # Arguments: # 1: builder_id (i.e. kubler/bob) +# 2: image_path function build_builder() { - local builder_id + local builder_id image_path builder_id="$1" + image_path="$2" # bootstrap a stage3 image if defined in build.conf [[ -n "${STAGE3_BASE}" ]] && build_core "${builder_id}" - build_image "${builder_id}" "${_BUILDER_PATH}" + build_image "${builder_id}" "${image_path}" "${_BUILDER_PATH}" } # Called when using --no-deps, in most cases a thin wrapper to build_image() # # Arguments: # 1: image_id (i.e. kubler/busybox) +# 2: image_path function build_image_no_deps() { - local image_id + local image_id image_path image_id="$1" - build_image "${image_id}" + image_path="$2" + build_image "${image_id}" "${image_path}" } # Sets __get_build_container to the builder_id required for building given image_id or signal 3 if not found/implemented. @@ -466,13 +702,14 @@ function get_build_container() { # shellcheck disable=SC2154 source_image_conf "${__expand_image_id}" fi - # get parent image basename - parent_image="${IMAGE_PARENT##*/}" - parent_ns="${IMAGE_PARENT%%/*}" + if [[ -n "${BUILDER}" ]]; then # BUILDER was set for this image, override default and start with given base builder from this image on build_container="${BUILDER}" elif [[ "${image_type}" == "${_IMAGE_PATH}" ]]; then + # get parent image basename + parent_image="${IMAGE_PARENT##*/}" + parent_ns="${IMAGE_PARENT%%/*}" builder_image="${build_container##*/}" [[ "${parent_image}" != "scratch" ]] && image_exists "${parent_ns}/${builder_image}-${parent_image}" \ && build_container="${parent_ns}/${builder_image}-${parent_image}" @@ -492,9 +729,10 @@ function push_auth() { local namespace repository_url login_args namespace="$1" repository_url="$2" + add_status_value 'auth' if [[ -z "${repository_url}" ]]; then DOCKER_LOGIN="${DOCKER_LOGIN:-${namespace}}" - msg "--> using docker.io/u/${DOCKER_LOGIN}" + msg_info "using docker.io/u/${DOCKER_LOGIN}" login_args=('-u' "${DOCKER_LOGIN}") # shellcheck disable=SC2153 if [[ -n "${DOCKER_PW}" ]]; then @@ -502,7 +740,7 @@ function push_auth() { fi "${DOCKER}" login "${login_args[@]}" || exit 1 else - msg "--> using ${repository_url}" + msg_info "using ${repository_url}" fi } @@ -521,9 +759,11 @@ function push_image() { # shellcheck disable=SC2181 [[ $? -ne 0 ]] && die "Couldn't determine image id for ${image_id}:${image_tag}: ${docker_image_id}" push_id="${repository_url}/${image_id}" - msg "${DOCKER}" tag "${docker_image_id}" "${push_id}" - "${DOCKER}" tag "${docker_image_id}" "${push_id}" || exit 1 + _status_msg="${DOCKER}" tag "${docker_image_id}" "${push_id}" + pwrap "${DOCKER}" tag "${docker_image_id}" "${push_id}" || die fi - msg "--> pushing ${push_id}" - "${DOCKER}" push "${push_id}" || exit 1 + add_status_value "${push_id}" + _status_msg="upload image" + pwrap "${DOCKER}" push "${push_id}" || die + msg_ok "done." } diff --git a/lib/engine/dummy.sh b/lib/engine/dummy.sh index 63ecef33..f83bbc7a 100644 --- a/lib/engine/dummy.sh +++ b/lib/engine/dummy.sh @@ -1,7 +1,11 @@ #!/usr/bin/env bash -# Copyright (c) 2014-2017, Erik Dannenberg +# Copyright (c) 2014-2019, Erik Dannenberg # All rights reserved. +# A bare bone implementation of the kubler build "interface" that can be used as a template for new engines. +# You can also extend an existing engine and just override a few functions as needed, i.e.: +#source "${_LIB_DIR}/engine/docker.sh" + ### ### REQUIRED FUNCTIONS ### @@ -37,7 +41,7 @@ function build_image() { image_type="${2:-${_IMAGE_PATH}}" msg "building dummy image: ${image_id}" # finish PACKAGES.md when using build-root.sh once the build is done: - #add_documentation_header "${image_id}" "${_IMAGE_PATH}" || die "failed to generate PACKAGES.md for ${image_id}" + #add_documentation_header "${image_id}" "${_IMAGE_PATH}" || die "Failed to generate PACKAGES.md for ${image_id}" } # Exits with signal 0 if given image_id has a built and ready to run image or signal 3 if not. diff --git a/lib/kubler-completion.bash b/lib/kubler-completion.bash new file mode 100755 index 00000000..b1671af9 --- /dev/null +++ b/lib/kubler-completion.bash @@ -0,0 +1,178 @@ +#!/usr/bin/env bash + +# Check if current compwords is in passed word list +# Arguments: +# 1: words_to_check +function _bc_kubler_find_in_compwords() { + ___bc_kubler_find_in_compwords= + local word subcommand c + c=1 + while [[ $c -lt ${COMP_CWORD} ]]; do + word="${COMP_WORDS[c]}" + for subcommand in $1; do + if [[ "${subcommand}" == "${word}" ]]; then + ___bc_kubler_find_in_compwords="${subcommand}" + return + fi + done + ((c++)) + done +} + +# Returns an array with all matches for given $pattern and $text. +# Adapted from: http://regexraptor.net/downloads/return_all_matches.sh +# +# Arguments: +# 1: pattern +# 2: text +function _bc_kubler_match_all() { + ___bc_kubler_match_all= + local pattern text text_step r_match loop_max regex_computed + pattern="$1" + text="$2" + + text_step="${text}" + r_match[0]="" + i=0 + loop_max="${#text}" + for (( ; ; )); do + [[ "${text_step}" =~ $pattern ]] + regex_computed="${BASH_REMATCH[0]}" + [[ "${#BASH_REMATCH[*]}" -eq 0 ]] && break + r_match[i]="${BASH_REMATCH[1]}" + text_step="${text_step#*$regex_computed}" + i=$(( i+1 )) + [[ ${i} -gt ${loop_max} ]] && break + done + ___bc_kubler_match_all=("${r_match[@]}") +} + +# Init global vars $_bc_kubler_cmds and $_bc_kubler__opts by parsing kubler's help output +function _bc_kubler_init() +{ + local regex_cmds help_output command cmd_opts + _bc_kubler_init_ns_vars + regex_cmds=',(--[a-z0-9-]*):' + for command in ${_bc_kubler_cmds[@]}; do + help_output="$(kubler "${command}" --help)" + _bc_kubler_match_all "${regex_cmds}" "${help_output}" + cmd_opts="${___bc_kubler_match_all[@]}" + declare -g _bc_kubler_cmd_"${command//-/_}"_opts="${cmd_opts}" + done +} + +# Init namespace related global vars that need to be refreshed on each new completion +function _bc_kubler_init_ns_vars() { + local help_output parsed_help + _bc_kubler_dir= + _bc_kubler_working_dir= + _bc_kubler_ns_type= + help_output="$(KUBLER_BC_HELP=true kubler --help)" + readarray -t parsed_help <<< "${help_output}" + _bc_kubler_dir="${parsed_help[0]}" + _bc_kubler_working_dir="${parsed_help[1]}" + _bc_kubler_ns_type="${parsed_help[2]}" + _bc_kubler_ns_default="${parsed_help[3]}" + readarray -t _bc_kubler_cmds <<< "${parsed_help[4]}" +} + +# complete a kubler namespace +function _bc_kubler_comp_namespace() { + if [[ "${_bc_kubler_ns_type}" == 'single' ]]; then + ___bc_kubler_comp_namespace="$(basename -- "${_bc_kubler_working_dir}")/ $(find "${_bc_kubler_dir}/namespaces" -maxdepth 1 -mindepth 1 -type d ! -name '.*' -printf '%f/\n')" + else + ___bc_kubler_comp_namespace="$(find "${_bc_kubler_working_dir}" "${_bc_kubler_dir}/namespaces" -maxdepth 1 -mindepth 1 -type d ! -name '.*' -printf '%f/\n')" + fi +} + +# complete a kubler image id +function _bc_kubler_comp_image() { + local namespace current_image_path + namespace="${COMP_WORDS[COMP_CWORD]%/*}" + if [[ "${namespace}" == 'kubler' ]]; then + current_image_path="${_bc_kubler_dir}/namespaces/${namespace}/images" + else + if [[ "${_bc_kubler_ns_type}" == 'single' && "${namespace}" == "${_bc_kubler_ns_default}" ]]; then + current_image_path="${_bc_kubler_working_dir}/images" + else + if [[ -d "${_bc_kubler_working_dir}/${namespace}/images" ]]; then + current_image_path="${_bc_kubler_working_dir}/${namespace}/images" + else + current_image_path="${_bc_kubler_dir}/namespaces/${namespace}/images" + fi + fi + fi + ___bc_kubler_comp_image="$(find "${current_image_path}" -maxdepth 1 -mindepth 1 -type d ! -name '.*' -printf '%f ')" +} + +function _kubler() +{ + local cur_comp prev kubler_global_opts kubler_cmds current_opts regex_cmds regex_opts current_cmd cmd_opts + COMPREPLY=() + cur_comp="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[COMP_CWORD-1]}" + kubler_global_opts="--help --debug --working-dir --verbose" + + _bc_kubler_init_ns_vars + + # check for a completed kubler command + _bc_kubler_find_in_compwords "${_bc_kubler_cmds}" + if [[ -z "${___bc_kubler_find_in_compwords}" ]]; then + case "${cur_comp}" in + # complete global args + --*) COMPREPLY=( $(compgen -W "${kubler_global_opts}" -- ${cur_comp}) ) + ;; + # complete commands + *) if [[ "${_bc_kubler_ns_type}" == 'none' ]];then + # only new command allowed if we are not in a kubler ns dir + COMPREPLY=( $(compgen -W "new" -- ${cur_comp}) ) + else + COMPREPLY=( $(compgen -W "${_bc_kubler_cmds}" -- ${cur_comp}) ) + fi + ;; + esac + return + else + # handle various cases for completed commands/args + current_cmd="${___bc_kubler_find_in_compwords}" + case "${current_cmd}:${cur_comp}" in + *:--*) current_opts="_bc_kubler_cmd_${current_cmd//-/_}_opts"; + COMPREPLY=( $(compgen -W "${!current_opts}" -- ${cur_comp}) ) + ;; + build:*/*) _bc_kubler_comp_image + cur_comp="${cur_comp#*/}" + COMPREPLY=( $(compgen -P "${COMP_WORDS[COMP_CWORD]%/*}/" -W "${___bc_kubler_comp_image}" -- ${cur_comp}) ) + ;; + clean:*) [[ "${prev}" == '-i' || "${prev}" == '--image-ns' ]] && \ + _bc_kubler_comp_namespace && compopt -o nospace && COMPREPLY=( $(compgen -W "${___bc_kubler_comp_namespace}" -- ${cur_comp}) ) + ;; + push:*/*) _bc_kubler_comp_image + cur_comp="${cur_comp#*/}" + COMPREPLY=( $(compgen -P "${COMP_WORDS[COMP_CWORD]%/*}/" -W "${___bc_kubler_comp_image}" -- ${cur_comp}) ) + ;; + dep-graph:*/*) + _bc_kubler_comp_image + cur_comp="${cur_comp#*/}" + COMPREPLY=( $(compgen -P "${COMP_WORDS[COMP_CWORD]%/*}/" -W "${___bc_kubler_comp_image}" -- ${cur_comp}) ) + ;; + new:*) cmd_opts='builder image namespace' + [[ "${_bc_kubler_ns_type}" == 'none' ]] && cmd_opts='namespace' + [[ "${_bc_kubler_ns_type}" == 'single' ]] && cmd_opts='builder image' + if [[ "${prev}" == 'image' || "${prev}" == 'builder' ]]; then + _bc_kubler_comp_namespace && compopt -o nospace && COMPREPLY=( $(compgen -W "${___bc_kubler_comp_namespace}" -- ${cur_comp}) ) + else + _bc_kubler_find_in_compwords "${cmd_opts}" + [[ -z "${___bc_kubler_find_in_compwords}" ]] && COMPREPLY=( $(compgen -W "${cmd_opts}" -- ${cur_comp}) ) + fi + ;; + *:*) [[ "${current_cmd}" = 'build' || "${current_cmd}" = 'dep-graph' || "${current_cmd}" = 'push' ]] && \ + _bc_kubler_comp_namespace && compopt -o nospace && COMPREPLY=( $(compgen -W "${___bc_kubler_comp_namespace}" -- ${cur_comp}) ) + ;; + esac + fi + return +} + +_bc_kubler_init + +complete -F _kubler kubler kubler.sh diff --git a/lib/template/docker/builder/build.conf b/lib/template/docker/builder/build.conf index f063acfe..753fc435 100644 --- a/lib/template/docker/builder/build.conf +++ b/lib/template/docker/builder/build.conf @@ -12,7 +12,7 @@ BUILDER_CAPS_SYS_PTRACE=true # Mount a host directory in the build container during the build, uses standard Docker -v syntax, default: unset/none # !! There is a reason Docker does not allow this, consider the consequences regarding build repeatability !! -#BUILDER_MOUNTS=("${_KUBLER_DIR}/tmp/somepath:/path/in/builder:ro") +#BUILDER_MOUNTS=("${KUBLER_DATA_DIR}/tmp/somepath:/path/in/builder:ro") # Use BUILDER_MOUNTS from parent image(s)?, default: false #PARENT_BUILDER_MOUNTS='true' @@ -24,7 +24,7 @@ BOB_CHOST="x86_64-pc-linux-gnu" BOB_CFLAGS="${BOB_CFLAGS:--mtune=generic -O2 -pipe}" BOB_CXXFLAGS="${BOB_CXXFLAGS:-${BOB_CFLAGS}}" -# active in configure_bob() hook, generally only differs when using crossdev +# active in configure_builder() hook, generally only differs when using crossdev BOB_BUILDER_CHOST="${BOB_BUILDER_CHOST:-${BOB_CHOST}}" BOB_BUILDER_CFLAGS="${BOB_BUILDER_CFLAGS:-${BOB_CFLAGS}}" BOB_BUILDER_CXXFLAGS="${BOB_BUILDER_CXXFLAGS:-${BOB_CXXFLAGS}}" diff --git a/lib/template/docker/builder/build_ext.sh b/lib/template/docker/builder/build_ext.sh new file mode 100644 index 00000000..fb793492 --- /dev/null +++ b/lib/template/docker/builder/build_ext.sh @@ -0,0 +1,25 @@ +# +# Kubler build container config, pick installed packages and/or customize the build +# + +# +# This hook can be used to configure the build container itself, install packages, run any command, etc +# +configure_builder() { + # Useful helpers + + # Update a Gentoo package use flag.. + #update_use 'dev-libs/some-lib' '+feature' '-some_other_feature' + + # ..or a Gentoo package keyword + #update_keywords 'dev-lang/some-package' '+~amd64' + + # Download file at url to /distfiles if it doesn't exist yet, file name is derived from last url fragment + #download_file "$url" + #echo "${__download_file}" + # Same as above but set a custom file name + #download_file "$url" my_file_v1.tar.gz + # Same as above but pass arbitrary additional args to curl + #download_file "$url" my_file_v1.tar.gz '-v' '--cookie' 'foo' + : +} diff --git a/lib/template/docker/builder/build.sh b/lib/template/docker/builder/build_stage3.sh similarity index 76% rename from lib/template/docker/builder/build.sh rename to lib/template/docker/builder/build_stage3.sh index e2a9ca2a..a2c9791a 100644 --- a/lib/template/docker/builder/build.sh +++ b/lib/template/docker/builder/build_stage3.sh @@ -1,20 +1,20 @@ # -# Build config, sourced by build-root.sh inside build container +# Build config, sourced by build-root.sh in the build container # # # This hook can be used to configure the build container itself, install packages, run any command, etc # -configure_bob() { - ### example for a stage3 builder setup, not required when extending an existing build container, like `kubler/bob` +configure_builder() { + ### example for a stage3 builder setup that should work out of the box on x86_64 and glibc stage3 fix_portage_profile_symlink # install basics used by helper functions + eselect news read new 1> /dev/null emerge app-portage/flaggie app-portage/eix app-portage/gentoolkit configure_eix mkdir -p /etc/portage/package.{accept_keywords,unmask,mask,use} touch /etc/portage/package.accept_keywords/flaggie # set locale of build container - # note: locale-gen is not supported when using a musl based stage3 echo 'en_US.UTF-8 UTF-8' >> /etc/locale.gen locale-gen echo 'LANG="en_US.UTF-8"' > /etc/env.d/02locale @@ -31,8 +31,9 @@ configure_bob() { update_keywords 'app-portage/layman' '+~amd64' update_keywords 'dev-python/ssl-fetch' '+~amd64' update_keywords 'app-admin/su-exec' '+~amd64' - emerge dev-vcs/git app-portage/layman sys-devel/distcc app-misc/jq app-shells/bash-completion + emerge dev-vcs/git app-portage/layman app-misc/jq app-shells/bash-completion install_git_postsync_hooks configure_layman + add_overlay kubler https://github.com/edannenberg/kubler-overlay.git emerge dev-lang/go } diff --git a/lib/template/docker/image/Dockerfile.template b/lib/template/docker/image/Dockerfile.template index 7b238964..d683fafa 100644 --- a/lib/template/docker/image/Dockerfile.template +++ b/lib/template/docker/image/Dockerfile.template @@ -3,4 +3,7 @@ LABEL maintainer ${MAINTAINER} ADD rootfs.tar / +COPY docker-healthcheck.sh /usr/bin/docker-healthcheck +HEALTHCHECK --interval=60s --timeout=5s --start-period=5s --retries=3 CMD ["docker-healthcheck"] + #CMD ["/bin/some-cmd", "--some-option", "some-value"] diff --git a/lib/template/docker/image/build-test.sh b/lib/template/docker/image/build-test.sh new file mode 100644 index 00000000..ff231f3c --- /dev/null +++ b/lib/template/docker/image/build-test.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env sh +set -eo pipefail + +# Do some tests and exit with either 0 for success or 1 for error +echo -e "Fix me or this built-test.sh will never succeed!" +false || exit 1 + +exit 0 diff --git a/lib/template/docker/image/build.conf b/lib/template/docker/image/build.conf index 95997c9e..babf6fe4 100644 --- a/lib/template/docker/image/build.conf +++ b/lib/template/docker/image/build.conf @@ -1,12 +1,12 @@ -# Used build container, optional, default: value of DEFAULT_BUILDER of namespace kubler.conf -#BUILDER="kubler/bob" +# Used build container, optional, default: value of DEFAULT_BUILDER in kubler.conf +#BUILDER="${_tmpl_image_builder}" # Run build container with `--cap-add SYS_PTRACE`, optional, default: false -#BUILDER_CAPS_SYS_PTRACE=true +#BUILDER_CAPS_SYS_PTRACE='true' # Mount a host directory in the build container during the build, uses standard Docker -v syntax, default: unset/none # !! There is a reason Docker does not allow this, consider the consequences regarding build repeatability !! -#BUILDER_MOUNTS=("${_KUBLER_DIR}/tmp/somepath:/path/in/builder:ro") +#BUILDER_MOUNTS=("${KUBLER_DATA_DIR}/tmp/somepath:/path/in/builder:ro") # Use BUILDER_MOUNTS from parent image(s)?, default: false #PARENT_BUILDER_MOUNTS='true' @@ -15,3 +15,23 @@ # Fully qualified image id (i.e. kubler/busybox), optional, default: scratch IMAGE_PARENT="${_tmpl_image_parent}" + +# Run a standard Docker health-check test as part of the build process. Add the health check as usual in +# Dockerfile.template and set this to true. Official docs: https://docs.docker.com/engine/reference/builder/#healthcheck +POST_BUILD_HC=true +# The health-check will only pass if the container reported healthy for this many tries +#POST_BUILD_HC_MIN_HEALTHY_STREAK=5 +# Timeout for the complete health-check test before it is aborted with an error, START_PERIOD will not count to this limit +#POST_BUILD_HC_MAX_DURATION=30 +# +# Any health-check args in the Dockerfile are overridden with the corresponding values below for the duration of +# the test. If not defined the Kubler internal defaults, as seen below, are used. +# +# Run the health-check command every n sec +#POST_BUILD_HC_INTERVAL=5 +# Timeout for each health check in secs +#POST_BUILD_HC_TIMEOUT=5 +# Grace period in secs for the container to get ready before any checks are run +#POST_BUILD_HC_START_PERIOD=3 +# Amount of health-check fails for a container before it considers itself unhealthy +#POST_BUILD_HC_RETRY=3 diff --git a/lib/template/docker/image/build.sh b/lib/template/docker/image/build.sh index 6dc0b679..aab78f24 100644 --- a/lib/template/docker/image/build.sh +++ b/lib/template/docker/image/build.sh @@ -18,7 +18,7 @@ _packages="" # # This hook can be used to configure the build container itself, install packages, run any command, etc # -configure_bob() +configure_builder() { # Packages installed in this hook don't end up in the final image but are available for depending image builds #emerge dev-lang/go app-misc/foo diff --git a/lib/template/docker/image/docker-healthcheck.sh b/lib/template/docker/image/docker-healthcheck.sh new file mode 100644 index 00000000..99323398 --- /dev/null +++ b/lib/template/docker/image/docker-healthcheck.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env sh +set -eo pipefail + +# Do some tests and exit with either 0 for healthy or 1 for unhealthy +echo "Fix me or this docker-healthcheck.sh will never succeed!" +false || exit 1 + +exit 0 diff --git a/lib/template/docker/namespace/kubler.conf.multi b/lib/template/docker/namespace/kubler.conf.multi index e98d1063..01f5e189 100644 --- a/lib/template/docker/namespace/kubler.conf.multi +++ b/lib/template/docker/namespace/kubler.conf.multi @@ -1,22 +1,39 @@ AUTHOR="${_tmpl_author} <${_tmpl_author_email}>" # Global tag (a.k.a version) for all images -IMAGE_TAG="${IMAGE_TAG:-${_tmpl_image_tag}}" +IMAGE_TAG="${_tmpl_image_tag}" + +# Ouput related config +# Disable compact output, effectively always passes -v to all commands +#KUBLER_VERBOSE='false' +# If true and compact output is enabled send output to log file instead of /dev/null +#KUBLER_CMD_LOG='true' +# Shall we ring the terminal bell on error? +#KUBLER_BELL_ON_ERROR='true' +# Disabling this only works when set as ENV before starting Kubler +#KUBLER_COLORS='true' + +# Update the portage container via git. Not recommended as it can be quite slow due to the amount of upstream changes. +#KUBLER_PORTAGE_GIT='false' +# Not recommended unless you are building your image stack from scratch and with your own stage3 build containers +#KUBLER_DISABLE_KUBLER_NS='false' +# Effectively always enables -s for the build command +#KUBLER_DISABLE_GPG='false' # Download location for stage3 and Portage files, use whitespace to set multiple servers # You may visit https://www.gentoo.org/downloads/mirrors/ and pick a http or ftp url near your physical location -#MIRROR="${MIRROR:-http://distfiles.gentoo.org/}" +#MIRROR="http://distfiles.gentoo.org/" # Portage snapshot date that is used to bootstrap the portage container -#PORTAGE_DATE="${PORTAGE_DATE:-latest}" +#PORTAGE_DATE='latest' # You can also define these per namespace conf -#BUILD_ENGINE="${BUILD_ENGINE:-${_tmpl_engine}}" -#DEFAULT_BUILDER="${DEFAULT_BUILDER:-kubler/bob}" +#BUILD_ENGINE='${_tmpl_engine}' +#DEFAULT_BUILDER='kubler/bob' # Variables starting with BOB_ are exported as ENV to all build containers # Timezone for build containers -#BOB_TIMEZONE="${BOB_TIMEZONE:-UTC}" +#BOB_TIMEZONE='UTC' # Options passed on to the make jobs launched from Portage # -jX = number of cpu cores used for compiling, rule of thumb: amount_of_cores+1, i.e. -j9 -#BOB_MAKEOPTS="${BOB_MAKEOPTS:--j9}" +#BOB_MAKEOPTS='-j9' diff --git a/lib/template/docker/namespace/kubler.conf.single b/lib/template/docker/namespace/kubler.conf.single index 6951edef..41479e27 100644 --- a/lib/template/docker/namespace/kubler.conf.single +++ b/lib/template/docker/namespace/kubler.conf.single @@ -1,3 +1,3 @@ -AUTHOR="${_tmpl_author} <${_tmpl_author_email}>" -DEFAULT_BUILDER="kubler/bob" -BUILD_ENGINE="${_tmpl_engine}" +AUTHOR='${_tmpl_author} <${_tmpl_author_email}>' +DEFAULT_BUILDER='kubler/bob' +BUILD_ENGINE='${_tmpl_engine}' diff --git a/lib/util.sh b/lib/util.sh new file mode 100644 index 00000000..f94d16ab --- /dev/null +++ b/lib/util.sh @@ -0,0 +1,456 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2014-2019, Erik Dannenberg +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +# following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +# disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +# following disclaimer in the documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +KUBLER_VERBOSE="${KUBLER_VERBOSE:-false}" +KUBLER_CMD_LOG="${KUBLER_CMD_LOG:-true}" +KUBLER_COLORS="${KUBLER_COLORS:-true}" +KUBLER_BELL_ON_ERROR="${KUBLER_BELL_ON_ERROR:-true}" + +# terminal output related stuff +_term_red=$(tput setaf 1) +_term_green=$(tput setaf 2) +_term_yellow=$(tput setaf 3) +#_term_blue=$(tput setaf 4) +#_term_magenta=$(tput setaf 5) +_term_cyan=$(tput setaf 6) + +#_term_bold=$(tput bold) +_term_reset=$(tput sgr0) +# clear until eol +_term_ceol=$(tput el) +# cursor 1 line up +_term_cup=$(tput cuu1) + +_is_terminal='true' +# unset color vars if requested or not on terminal +[[ "${KUBLER_COLORS}" == 'false' || ! -t 1 ]] \ + && unset _term_red _term_green _term_yellow _term_blue _term_magenta _term_cyan _term_bold _term_reset +[[ ! -t 1 ]] && unset _is_terminal + +# 1 char = 1 frame +_status_spinner='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏' +# printf template, extend/override at will +_def_status_spinner_tmpl="${_term_yellow}»[${_term_reset}%s${_term_yellow}]»" +_status_spinner_tmpl="${_def_status_spinner_tmpl}" +_status_msg='this could be your msg' + +_def_prefix_info="${_term_yellow}»»»»»${_term_reset}" +_def_prefix_info_sub="${_term_yellow}»»»${_term_reset}" +_def_prefix_ok="${_term_yellow}»[${_term_green}✔${_term_yellow}]»${_term_reset}" +_def_prefix_error="${_term_yellow}»[${_term_red}✘${_term_yellow}]»${_term_reset}" +_def_prefix_warn="${_term_yellow}»[${_term_cyan}!${_term_yellow}]»${_term_reset}" +_def_prefix_ask="${_term_yellow}»[${_term_cyan}?${_term_yellow}]»${_term_reset}" + +_prefix_info="${_def_prefix_info}" +_prefix_info_sub="${_def_prefix_info_sub}" +_prefix_ok="${_def_prefix_ok}" +_prefix_error="${_def_prefix_error}" +_prefix_warn="${_def_prefix_warn}" +_prefix_ask="${_def_prefix_ask}" + +# Arguments +# n: message +function msg() { + echo -e "$@" +} + +# Arguments +# n: message +function msg_error() { + msg "${_prefix_error}" "${@}" +} + +# Arguments +# n: message +function msg_info() { + msg "${_prefix_info}" "${@}" +} + +# Arguments +# n: message +function msg_info_sub() { + msg "${_prefix_info_sub}" "${@}" +} + +# Arguments +# n: message +function msg_ok() { + msg "${_prefix_ok}" "${@}" +} + +# Arguments +# n: message +function msg_warn() { + msg "${_prefix_warn}" "${@}" +} + +# printf version of msg(), 20 char padding between prefix and suffix +# +# Arguments: +# 1: msg_prefix +# n: msg_suffix +function msgf() { + local msg_prefix + msg_prefix="$1" + shift + printf "${_prefix_info_sub} %-20s %s\n" "${msg_prefix}" "$@" +} + +# Arguments: +# 1: src_str +# 2: repeat_amount +function repeat_str() { + __repeat_str= + local src_str repeat_amount tmp_str + src_str="$1" + repeat_amount="$2" + [[ ${repeat_amount} -eq 0 ]] && { __repeat_str=""; return; } + printf -v tmp_str '%*s' "${repeat_amount}" '' + # shellcheck disable=SC2034 + __repeat_str="${tmp_str// /${src_str}}" +} + +function is_compact_output_active() { + # shellcheck disable=SC2154 + [[ -z "${_is_terminal}" || "${_arg_verbose}" == 'on' ]] && return 3 + return 0 +} + +# Print status bar with an activity spinner. The spinner moves one frame per call. +# +# Arguments: +# 1: status_msg - text to be displayed +# 2: callback_name - optional, a function named that is called on each invocation. it's output var is echoed +# 3: callback_args - optional, passed to callback function on execution +function status_with_spinner() { + local status_msg callback_name cb_ret_name callback_output + status_msg="$1" + [[ -n "$2" ]] && callback_name="$2" && shift 2; + + if [[ -n "${callback_name}" ]] && declare -F "${callback_name}" &>/dev/null; then + "${callback_name}" "$@" 1> /dev/null + cb_ret_name=__"${callback_name}" + callback_output="${!cb_ret_name}" + fi + + printf "${_term_cup}${_term_ceol}${_status_spinner_tmpl}${_term_reset} %s\n\r" \ + "${_status_spinner:x++%${#_status_spinner}:1}" \ + "${status_msg}${callback_output}" + sleep 0.5 +} + +# Manipulate the global status bar +# +# Arguments: +# 1: status_value - optional, the value to be added to the global status bar. if empty the bar is reset to it's default. +# 2: append - optional, if true appends the value to the current state, else the status bar defaults are used as base +function add_status_value() { + local status_value append_status status_box + status_value="$1" + append_status="${2:-false}" + + # shellcheck disable=SC2154,2034 + [[ -n "${status_value}" ]] \ + && status_box="${_term_yellow}[${_term_reset}${status_value}${_term_yellow}]»${_term_reset}" + + if [[ "${append_status}" == 'false' ]]; then + # shellcheck disable=SC2154,2034 + _status_spinner_tmpl="${_def_status_spinner_tmpl}${status_box}" + # shellcheck disable=SC2154,2034 + _prefix_error="${_def_prefix_error}${status_box}" + # shellcheck disable=SC2154,2034 + _prefix_info="${_def_prefix_info}${status_box}" + # shellcheck disable=SC2154,2034 + _prefix_ok="${_def_prefix_ok}${status_box}" + # shellcheck disable=SC2154,2034 + _prefix_warn="${_def_prefix_warn}${status_box}" + else + # shellcheck disable=SC2154,2034 + _status_spinner_tmpl="${_status_spinner_tmpl}${status_box}" + # shellcheck disable=SC2154,2034 + _prefix_error="${_prefix_error}${status_box}" + # shellcheck disable=SC2154,2034 + _prefix_info="${_prefix_info}${status_box}" + # shellcheck disable=SC2154,2034 + _prefix_ok="${_prefix_ok}${status_box}" + # shellcheck disable=SC2154,2034 + _prefix_warn="${_prefix_warn}${status_box}" + fi +} + +# Callback for pwrap that add the size of a given filen_path to the status bar. Usage: +# +# _pwrap_callback=('cb_add_filesize_to_status' "${some_path}") +# pwrap some_command "foo" +# +# 1: file_path +function cb_add_filesize_to_status() { + __cb_add_filesize_to_status= + local file_path file_size + file_path="$1" + file_size='n/a' + get_file_size "${file_path}" 'true' + [[ -n "${__get_file_size}" ]] && file_size="${__get_file_size}" + # shellcheck disable=SC2034 + __cb_add_filesize_to_status=" ${_term_yellow}[${_term_reset} ${file_size} ${_term_yellow}]${_term_reset}" +} + +# Thin wrapper for passed command that prints an activity spinner along with the current value of the global +# var _status_msg for the duration of the command. StdOut of passed command is redirected into the void. If --verbose +# was passed, or if we are not in a terminal, only _status_msg is echoed, no spinner and normal command output. +# +# Arguments: +# 1: command +# n: args +# Return value: exit signal of passed command +function pwrap() { + local exit_sig no_stderr redirect_target + exit_sig=0 + no_stderr= + no_log= + [[ "$1" == 'nostderr' ]] && no_stderr='true' && shift + [[ "$1" == 'nolog' ]] && no_log='true' && shift + msg_info "${_status_msg}" + is_compact_output_active || { "$@"; return $?; } + + redirect_target='/dev/null' + # shellcheck disable=SC2154 + [[ "${KUBLER_CMD_LOG}" == 'true' ]] && redirect_target="${_KUBLER_LOG_DIR}/${_arg_command}.log" + + # launch spinner in bg + # shellcheck disable=SC2154 + while true;do status_with_spinner "${_status_msg}" "${_pwrap_callback[@]}";done & + # save job id + _pwrap_handler_args=$! + add_trap_fn 'pwrap_handler' + # exec passed cmd with disabled stdout + if [[ -z "${no_stderr}" && "${redirect_target}" == '/dev/null' ]] \ + || [[ -z "${no_stderr}" && "${no_log}" == 'true' ]] + then + "$@" 1> /dev/null + exit_sig=$? + elif [[ "${redirect_target}" == '/dev/null' ]] || [[ "${no_log}" == 'true' ]]; then + "$@" &> /dev/null + exit_sig=$? + else + echo "»»» $(date) »»» exec:" "$@" >> "${redirect_target}" + "$@" &>> "${redirect_target}" + exit_sig=$? + fi + # kill job + kill $! + wait $! &> /dev/null + rm_trap_fn 'pwrap_handler' + unset _pwrap_callback _pwrap_handler_args + echo -n "${_term_cup}${_term_ceol}" + return ${exit_sig} +} + +function pwrap_handler() { + [[ -n "${_pwrap_handler_args}" ]] && \ + { kill "${_pwrap_handler_args}" 2> /dev/null; + wait "${_pwrap_handler_args}" &> /dev/null; + unset _pwrap_handler_args _prwap_callback; } +} + +# Read user input displaying given question +# +# Arguments: +# 1: question +# 2: default_value +# Return value: user input or passed default_value +function ask() { + __ask= + local question default_value + question="$1" + default_value="$2" + read -r -p "${_prefix_ask} ${question} (${default_value}): " __ask + [[ -z "${__ask}" ]] && __ask="${default_value}" +} + +# Arguments: +# 1: file_path as string +# 2: error_msg, optional +function file_exists_or_die() { + local file error_msg + file="$1" + error_msg="${2:-couldn\'t read: ${file}}" + [[ -f "${file}" ]] || die "${error_msg}" +} + +# Arguments: +# 1: file_path as string +function file_exists_and_truncate() { + local file error_msg + file="$1" + [[ -f "${file}" ]] && cp /dev/null "${file}" +} + +function sha_sum() { + [[ -n "$(command -v sha512sum)" ]] && echo 'sha512sum' || echo 'shasum -a512' +} + +# Returns 0 if given string contains given word or 3 if not. Does *not* match substrings. +# +# Arguments: +# 1: string +# 2: word +function string_has_word() { + local regex + regex="(^| )${2}($| )" + if [[ "${1}" =~ $regex ]];then + return 0 + else + return 3 + fi +} + +# Arguments: +# 1: value - string to check for +# 2: src_array - passed via "${some_array[@]}" +function is_in_array() { + local value src_array + value="$1" + shift + src_array=( "$@" ) + for entry in "${src_array[@]}"; do + [[ "${entry}" == "${value}" ]] && return 0 + done + return 1 +} + +# Remove any entry from given non-associative array_to_check that matches given value_to_rm. The returned array is not +# sparse and the original order is preserved. +# +# Arguments: +# 1: value_to_rm - string to remove from array +# n: array_to_check - passed via "${some_array[@]}" +function rm_array_value() { + __rm_array_value=() + local value_to_rm array_to_check tmp_array entry + value_to_rm="$1" + shift + array_to_check=( "$@" ) + tmp_array=() + for entry in "${array_to_check[@]}"; do + [[ "${entry}" != "${value_to_rm}" ]] && tmp_array+=( "${entry}" ) + done + # shellcheck disable=SC2034 + __rm_array_value=( "${tmp_array[@]}" ) +} + +# Run sed over given $file with given $sed_args array +# +# Arguments: +# 1: full file path as string +# 2: sed_args as array +function replace_in_file() { + local file_path sed_arg + file_path="${1}" + declare -a sed_arg=("${!2}") + sed "${sed_arg[@]}" "${file_path}" > "${file_path}.tmp" || die + mv "${file_path}.tmp" "${file_path}" || die +} + +# Set __get_file_size to size of file in bytes for given file_path. +# +# Arguments: +# 1: file_path - the path to get the size for +# 2: format_output - optional, if true converts output to human readable format, default: false +function get_file_size() { + __get_file_size= + local file_path du_args file_size + file_path="$1" + format_output="${2:-false}" + if [[ -f "${file_path}" ]]; then + du_args=() + [[ "${format_output}" == 'true' ]] && du_args+=( '-h' ) + file_size="$(du "${du_args[@]}" --max-depth=0 "${file_path}" | cut -f1)" + __get_file_size="${file_size}" + fi +} + +# Arguments: +# 1: dir_path +function dir_is_empty() { + local dir_path + dir_path="$1" + [[ -d "${dir_path}" ]] || die "${dir_path} is not a directory" + [[ -z "$(ls -A "${dir_path}")" ]]&& return 0 + return 1 +} + +# Returns with exit signla 0 if given dir_path has sub directories, or 3 if not +# +# Arguments: +# 1: dir_path +function dir_has_subdirs() { + local dir_path exit_sig + dir_path="$1" + ls "${dir_path}"/*/ &> /dev/null + exit_sig=$? + [[ ${exit_sig} -eq 0 ]] && return 0 + return 3 +} + +# Arguments: +# 1: dir_path +function is_git_dir() { + local dir_path + dir_path="$1" + git -C "$1" rev-parse 2> /dev/null + return $? +} + +# Arguments: +# 1: repo_url +# 2: working_dir +# 3: dir_name +function clone_or_update_git_repo() { + local repo_url working_dir dir_name cherry_out git_args + repo_url="$1" + working_dir="$2" + dir_name="$3" + git_args=() + [[ ! -d "${working_dir}" ]] && die "Git working dir ${working_dir} does not exist." + is_compact_output_active && git_args+=( '-q' ) + if is_git_dir "${working_dir}/${dir_name}"; then + _status_msg='check git remote for updates' + pwrap 'nostderr' git -C "${working_dir}/${dir_name}" fetch "${git_args[@]}" origin || die + cherry_out="$(git -C "${working_dir}/${dir_name}" cherry master origin/master)" + if [[ -z "${cherry_out}" ]]; then + msg_ok "no updates." + return 0 + else + # reset to remote as PACKAGES.md files might prevent a normal pull + _status_msg="updates found, reset ${working_dir}/${dir_name} to remote" + pwrap 'nostderr' git -C "${working_dir}/${dir_name}" reset "${git_args[@]}" --hard origin/master || die + msg_ok "updated." + return 3 + fi + else + _status_msg="clone ${repo_url}" + pwrap 'nostderr' git -C "${working_dir}" clone "${git_args[@]}" "${repo_url}" "${dir_name}" || die + msg_ok "cloned." + fi +}