From c6ffbd4fedabaadf386fb472367aa17ea3077d69 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 Dec 2025 12:57:44 -0500 Subject: [PATCH 01/10] build-sys: Always build packages as separate stage We were previously trying to support a direct `podman/docker build` *and* injecting externally built packages (for CI). Looking to rework for sealed images it was too hacky; let's just accept that a raw `podman build` no longer works, the canonical entry for local build is `just build` which builds both a package and a container. This way CI and local work exactly the same. Signed-off-by: Colin Walters --- Dockerfile | 13 ++----------- Justfile | 57 +++++++++++++++--------------------------------------- 2 files changed, 18 insertions(+), 52 deletions(-) diff --git a/Dockerfile b/Dockerfile index 499c7199b..48232c761 100644 --- a/Dockerfile +++ b/Dockerfile @@ -83,18 +83,9 @@ ARG rootfs="" RUN --mount=type=bind,from=packaging,target=/run/packaging /run/packaging/configure-rootfs "${variant}" "${rootfs}" COPY --from=packaging /usr-extras/ /usr/ -# Default target for source builds (just build) -# Installs packages from the internal build stage +# Final target: installs pre-built packages from /run/packages volume mount. +# Use with: podman build --target=final -v path/to/packages:/run/packages:ro FROM final-common as final -RUN --mount=type=bind,from=packaging,target=/run/packaging \ - --mount=type=bind,from=build,target=/build-output \ - --network=none \ - /run/packaging/install-rpm-and-setup /build-output/out -RUN bootc container lint --fatal-warnings - -# Alternative target for pre-built packages (CI workflow) -# Use with: podman build --target=final-from-packages -v path/to/packages:/run/packages:ro -FROM final-common as final-from-packages RUN --mount=type=bind,from=packaging,target=/run/packaging \ --network=none \ /run/packaging/install-rpm-and-setup /run/packages diff --git a/Justfile b/Justfile index 16a98be19..4a56ba31c 100644 --- a/Justfile +++ b/Justfile @@ -70,22 +70,25 @@ _git-build-vars: # Note commonly you might want to override the base image via e.g. # `just build --build-arg=base=quay.io/fedora/fedora-bootc:42` # -# The Dockerfile builds RPMs internally in its 'build' stage, so we don't need -# to call 'package' first. This avoids cache invalidation from external files. -build: _keygen - #!/bin/bash - set -xeuo pipefail - eval $(just _git-build-vars) - podman build {{base_buildargs}} --target=final \ - --build-arg=SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH} \ - --build-arg=pkgversion=${VERSION} \ - -t {{base_img}}-bin {{buildargs}} . - ./hack/build-sealed {{variant}} {{base_img}}-bin {{base_img}} {{sealed_buildargs}} +# This first builds RPMs via the `package` target, then injects them +# into the container image. +build: package _keygen + @just _build-from-package target/packages # Generate Secure Boot keys (only for our own CI/testing) _keygen: ./hack/generate-secureboot-keys +# Internal helper: build container image from packages at PATH +_build-from-package PATH: + #!/bin/bash + set -xeuo pipefail + # Resolve to absolute path for podman volume mount + # Use :z for SELinux relabeling + pkg_path=$(realpath "{{PATH}}") + podman build --target=final -v "${pkg_path}":/run/packages:ro,z -t {{base_img}}-bin {{buildargs}} . + ./hack/build-sealed {{variant}} {{base_img}}-bin {{base_img}} {{sealed_buildargs}} + # Build a sealed image from current sources. build-sealed: @just --justfile {{justfile()}} variant=composefs-sealeduki-sdboot build @@ -108,34 +111,6 @@ package: _packagecontainer chmod a+r target/packages/*.rpm podman rmi localhost/bootc-pkg -# Copy pre-existing packages from PATH into target/packages/ -# Note: This is mainly for CI artifact extraction; build-from-package -# now uses volume mounts directly instead of copying to target/packages/. -copy-packages-from PATH: - #!/bin/bash - set -xeuo pipefail - if ! compgen -G "{{PATH}}/*.rpm" > /dev/null; then - echo "Error: No packages found in {{PATH}}" >&2 - exit 1 - fi - mkdir -p target/packages - rm -vf target/packages/*.rpm - cp -v {{PATH}}/*.rpm target/packages/ - chmod a+rx target target/packages - chmod a+r target/packages/*.rpm - -# Build the container image using pre-existing packages from PATH -# Uses the 'final-from-packages' target with a volume mount to inject packages, -# avoiding Docker context cache invalidation issues. -build-from-package PATH: _keygen - #!/bin/bash - set -xeuo pipefail - # Resolve to absolute path for podman volume mount - # Use :z for SELinux relabeling - pkg_path=$(realpath "{{PATH}}") - podman build {{base_buildargs}} --target=final-from-packages -v "${pkg_path}":/run/packages:ro,z -t {{base_img}}-bin {{buildargs}} . - ./hack/build-sealed {{variant}} {{base_img}}-bin {{base_img}} {{sealed_buildargs}} - # Pull images used by hack/lbi _pull-lbi-images: podman pull -q --retry 5 --retry-delay 5s {{lbi_images}} @@ -146,8 +121,8 @@ build-integration-test-image: build _pull-lbi-images ./hack/build-sealed {{variant}} {{integration_img}}-bin {{integration_img}} {{sealed_buildargs}} # Build integration test image using pre-existing packages from PATH -build-integration-test-image-from-package PATH: _pull-lbi-images - @just build-from-package {{PATH}} +build-integration-test-image-from-package PATH: _keygen _pull-lbi-images + @just _build-from-package {{PATH}} cd hack && podman build {{base_buildargs}} -t {{integration_img}}-bin -f Containerfile . ./hack/build-sealed {{variant}} {{integration_img}}-bin {{integration_img}} {{sealed_buildargs}} From 410243856107bd9db94cfc453a3b75053595d594 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 Dec 2025 13:20:29 -0500 Subject: [PATCH 02/10] build-sys: Move `build` back to being the default target Oops. Signed-off-by: Colin Walters --- Justfile | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Justfile b/Justfile index 4a56ba31c..8b29ed594 100644 --- a/Justfile +++ b/Justfile @@ -47,6 +47,15 @@ buildargs := base_buildargs + " --secret=id=secureboot_key,src=target/test-secur # Args for build-sealed (no base arg, it sets that itself) sealed_buildargs := "--build-arg=variant=" + variant + " --secret=id=secureboot_key,src=target/test-secureboot/db.key --secret=id=secureboot_cert,src=target/test-secureboot/db.crt" +# The default target: build the container image from current sources. +# Note commonly you might want to override the base image via e.g. +# `just build --build-arg=base=quay.io/fedora/fedora-bootc:42` +# +# This first builds RPMs via the `package` target, then injects them +# into the container image. +build: package _keygen + @just _build-from-package target/packages + # Compute SOURCE_DATE_EPOCH and VERSION from git for reproducible builds. # Outputs shell variable assignments that can be eval'd. _git-build-vars: @@ -66,15 +75,6 @@ _git-build-vars: echo "SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH}" echo "VERSION=${VERSION}" -# The default target: build the container image from current sources. -# Note commonly you might want to override the base image via e.g. -# `just build --build-arg=base=quay.io/fedora/fedora-bootc:42` -# -# This first builds RPMs via the `package` target, then injects them -# into the container image. -build: package _keygen - @just _build-from-package target/packages - # Generate Secure Boot keys (only for our own CI/testing) _keygen: ./hack/generate-secureboot-keys From 5471e6821ca2739180cc439128214030f7418e82 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 Dec 2025 14:11:49 -0500 Subject: [PATCH 03/10] ostree: Handle images without final diffid Signed-off-by: Colin Walters --- crates/ostree-ext/src/container/store.rs | 53 ++++++++++++++---------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/crates/ostree-ext/src/container/store.rs b/crates/ostree-ext/src/container/store.rs index 3e9991faa..2a480b2e7 100644 --- a/crates/ostree-ext/src/container/store.rs +++ b/crates/ostree-ext/src/container/store.rs @@ -1508,13 +1508,7 @@ pub(crate) fn export_to_oci( let srcinfo = query_image(repo, imgref)?.ok_or_else(|| anyhow!("No such image"))?; let (commit_layer, component_layers, remaining_layers) = parse_manifest_layout(&srcinfo.manifest, &srcinfo.configuration)?; - let commit_layer = commit_layer.ok_or_else(|| anyhow!("Missing {DIFFID_LABEL}"))?; - let commit_chunk_ref = ref_for_layer(commit_layer)?; - let commit_chunk_rev = repo.require_rev(&commit_chunk_ref)?; - let mut chunking = chunking::Chunking::new(repo, &commit_chunk_rev)?; - for layer in component_layers { - chunking_from_layer_committed(repo, layer, &mut chunking)?; - } + // Unfortunately today we can't guarantee we reserialize the same tar stream // or compression, so we'll need to generate a new copy of the manifest and config // with the layers reset. @@ -1526,8 +1520,6 @@ pub(crate) fn export_to_oci( } new_config.rootfs_mut().diff_ids_mut().clear(); - let mut dest_oci = ocidir::OciDir::ensure(dest_oci.try_clone()?)?; - let opts = ExportOpts { skip_compression: opts.skip_compression, authfile: opts.authfile, @@ -1536,19 +1528,36 @@ pub(crate) fn export_to_oci( let mut labels = HashMap::new(); - // Given the object chunking information we recomputed from what - // we found on disk, re-serialize to layers (tarballs). - export_chunked( - repo, - &srcinfo.base_commit, - &mut dest_oci, - &mut new_manifest, - &mut new_config, - &mut labels, - chunking, - &opts, - "", - )?; + let mut dest_oci = ocidir::OciDir::ensure(dest_oci.try_clone()?)?; + + let commit_chunk_ref = commit_layer + .as_ref() + .map(|l| ref_for_layer(l)) + .transpose()?; + let commit_chunk_rev = commit_chunk_ref + .as_ref() + .map(|r| repo.require_rev(&r)) + .transpose()?; + if let Some(commit_chunk_rev) = commit_chunk_rev { + let mut chunking = chunking::Chunking::new(repo, &commit_chunk_rev)?; + for layer in component_layers { + chunking_from_layer_committed(repo, layer, &mut chunking)?; + } + + // Given the object chunking information we recomputed from what + // we found on disk, re-serialize to layers (tarballs). + export_chunked( + repo, + &srcinfo.base_commit, + &mut dest_oci, + &mut new_manifest, + &mut new_config, + &mut labels, + chunking, + &opts, + "", + )?; + } // Now, handle the non-ostree layers; this is a simple conversion of // From 206cd9e7762e2636dcf0d73b8d1686d1707e2956 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 Dec 2025 13:19:24 -0500 Subject: [PATCH 04/10] build-sys: Always build a "from scratch" image This changes things so we always run through https://docs.fedoraproject.org/en-US/bootc/building-from-scratch/ in our default builds, which helps work around https://github.com/containers/composefs-rs/issues/132 But it will also help clean up our image building in general a bit. Signed-off-by: Colin Walters --- Dockerfile | 31 ++++++++++++++++++++++++++----- Justfile | 6 +++++- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index 48232c761..410b06e59 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,14 +15,10 @@ COPY . /src FROM scratch as packaging COPY contrib/packaging / -FROM $base as base -# Mark this as a test image (moved from --label build flag to fix layer caching) -LABEL bootc.testimage="1" - # This image installs build deps, pulls in our source code, and installs updated # bootc binaries in /out. The intention is that the target rootfs is extracted from /out # back into a final stage (without the build deps etc) below. -FROM base as buildroot +FROM $base as buildroot # Flip this off to disable initramfs code ARG initramfs=1 # This installs our buildroot, and we want to cache it independently of the rest. @@ -40,6 +36,31 @@ FROM buildroot as sdboot-content # Writes to /out RUN /src/contrib/packaging/configure-systemdboot download +# We always do a "from scratch" build +# https://docs.fedoraproject.org/en-US/bootc/building-from-scratch/ +# because this fixes https://github.com/containers/composefs-rs/issues/132 +# NOTE: Until we have https://gitlab.com/fedora/bootc/base-images/-/merge_requests/317 +# this stage will end up capturing whatever RPMs we find at this time. +# NOTE: This is using the *stock* bootc binary, not the one we want to build from +# local sources. We'll override it later. +# NOTE: All your base belong to me. +FROM $base as target-base +RUN /usr/libexec/bootc-base-imagectl build-rootfs --manifest=standard /target-rootfs + +FROM scratch as base +COPY --from=target-base /target-rootfs/ / +# Note we don't do any customization here yet +# Mark this as a test image +LABEL bootc.testimage="1" +# Otherwise standard metadata +LABEL containers.bootc 1 +LABEL ostree.bootable 1 +# https://pagure.io/fedora-kiwi-descriptions/pull-request/52 +ENV container=oci +# Optional labels that only apply when running this image as a container. These keep the default entry point running under systemd. +STOPSIGNAL SIGRTMIN+3 +CMD ["/sbin/init"] + # NOTE: Every RUN instruction past this point should use `--network=none`; we want to ensure # all external dependencies are clearly delineated. diff --git a/Justfile b/Justfile index 8b29ed594..4b1455f7b 100644 --- a/Justfile +++ b/Justfile @@ -43,7 +43,11 @@ lbi_images := "quay.io/curl/curl:latest quay.io/curl/curl-base:latest registry.a generic_buildargs := "" # Args for package building (no secrets needed, just builds RPMs) base_buildargs := generic_buildargs + " --build-arg=base=" + base + " --build-arg=variant=" + variant -buildargs := base_buildargs + " --secret=id=secureboot_key,src=target/test-secureboot/db.key --secret=id=secureboot_cert,src=target/test-secureboot/db.crt" +# - scratch builds need extra perms per https://docs.fedoraproject.org/en-US/bootc/building-from-scratch/ +# - we do secure boot signing here, so provide the keys +buildargs := base_buildargs \ + + " --cap-add=all --security-opt=label=type:container_runtime_t --device /dev/fuse" \ + + " --secret=id=secureboot_key,src=target/test-secureboot/db.key --secret=id=secureboot_cert,src=target/test-secureboot/db.crt" # Args for build-sealed (no base arg, it sets that itself) sealed_buildargs := "--build-arg=variant=" + variant + " --secret=id=secureboot_key,src=target/test-secureboot/db.key --secret=id=secureboot_cert,src=target/test-secureboot/db.crt" From e552acb477ce70ef8d182ba41408f5a6d3c5e6f2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 Dec 2025 15:08:07 -0500 Subject: [PATCH 05/10] build-sys: Consolidate test image content into base Move all content from the derived test image (hack/Containerfile) into the main Dockerfile base image. This includes nushell, cloud-init, and the other testing packages from packages.txt. This simplifies the build by avoiding the need to juggle multiple images during testing workflows - the base image now contains everything needed. Assisted-by: OpenCode (Claude Sonnet 4) Signed-off-by: Colin Walters --- .dockerignore | 2 ++ Dockerfile | 2 ++ hack/Containerfile | 22 +--------------------- hack/Containerfile.packit | 13 +------------ hack/provision-derived.sh | 24 +++++++++++++----------- 5 files changed, 19 insertions(+), 44 deletions(-) diff --git a/.dockerignore b/.dockerignore index 1f5579978..5f13236dd 100644 --- a/.dockerignore +++ b/.dockerignore @@ -15,6 +15,8 @@ !docs/ # We use the spec file !contrib/ +# This is used to add content on top of our default base +!hack/ # The systemd units and baseimage bits end up in installs !systemd/ !baseimage/ diff --git a/Dockerfile b/Dockerfile index 410b06e59..ef12cedec 100644 --- a/Dockerfile +++ b/Dockerfile @@ -49,6 +49,8 @@ RUN /usr/libexec/bootc-base-imagectl build-rootfs --manifest=standard /target-ro FROM scratch as base COPY --from=target-base /target-rootfs/ / +COPY --from=src /src/hack/ /run/hack/ +RUN cd /run/hack/ && ./provision-derived.sh # Note we don't do any customization here yet # Mark this as a test image LABEL bootc.testimage="1" diff --git a/hack/Containerfile b/hack/Containerfile index ea24df36f..13dc120b9 100644 --- a/hack/Containerfile +++ b/hack/Containerfile @@ -7,30 +7,10 @@ FROM scratch as context # We only need this stuff in the initial context COPY . / -# An intermediate layer which caches the extended RPMS -FROM localhost/bootc as extended -# And this layer has additional stuff for testing, such as nushell etc. -RUN --mount=type=bind,from=context,target=/run/context <&2; exit 1 ;; -esac - # Ensure this is pre-created mkdir -p -m 0700 /var/roothome mkdir -p ~/.config/nushell @@ -51,9 +44,8 @@ grep -Ev -e '^#' packages.txt | xargs dnf -y install cat <> /usr/lib/bootc/kargs.d/20-console.toml kargs = ["console=ttyS0,115200n8"] KARGEOF -if test $cloudinit = 1; then - dnf -y install cloud-init - ln -s ../cloud-init.target /usr/lib/systemd/system/default.target.wants +dnf -y install cloud-init +ln -s ../cloud-init.target /usr/lib/systemd/system/default.target.wants # Allow root SSH login for testing with bcvk/tmt mkdir -p /etc/cloud/cloud.cfg.d cat > /etc/cloud/cloud.cfg.d/80-enable-root.cfg <<'CLOUDEOF' @@ -67,7 +59,6 @@ growpart: devices: ["/sysroot"] resize_rootfs: false CLOUDEOF -fi dnf clean all # Stock extra cleaning of logs and caches in general (mostly dnf) @@ -122,3 +113,14 @@ d /var/lib/dhclient 0755 root root - - EOF rm -rf /var/lib/dhclient fi + +# For test-22-logically-bound-install +cp -a lbi/usr/. /usr +for x in curl.container curl-base.image podman.image; do + ln -s /usr/share/containers/systemd/$x /usr/lib/bootc/bound-images.d/$x +done + +# Add some testing kargs into our dev builds +install -D -t /usr/lib/bootc/kargs.d test-kargs/* +# Also copy in some default install configs we use for testing +install -D -t /usr/lib/bootc/install/ install-test-configs/* From 1c50235613ad3a8d6bffe4a3c33ad444238d187c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 Dec 2025 15:38:30 -0500 Subject: [PATCH 06/10] build-sys: Remove separate integration test image The previous commit consolidated test content (nushell, cloud-init, etc.) into the base image. This completes that work by removing the separate `build-integration-test-image` target and updating all references. Now `just build` produces the complete test-ready image directly, simplifying the build pipeline and eliminating the intermediate `localhost/bootc-integration` image. Signed-off-by: Colin Walters --- .github/workflows/build-and-publish.yml | 4 +- .github/workflows/ci.yml | 10 ++--- CONTRIBUTING.md | 2 +- Justfile | 50 +++++++++++-------------- crates/xtask/src/xtask.rs | 6 +-- hack/Containerfile | 16 -------- hack/Containerfile.drop-lbis | 2 +- hack/Containerfile.packit | 2 +- hack/provision-derived.sh | 17 +++++++-- hack/provision-packit.sh | 2 +- hack/system-reinstall-bootc.exp | 2 +- tmt/tests/Dockerfile.upgrade | 2 +- 12 files changed, 52 insertions(+), 63 deletions(-) delete mode 100644 hack/Containerfile diff --git a/.github/workflows/build-and-publish.yml b/.github/workflows/build-and-publish.yml index 817778f9b..f4caa7a13 100644 --- a/.github/workflows/build-and-publish.yml +++ b/.github/workflows/build-and-publish.yml @@ -42,7 +42,9 @@ jobs: fi - name: Build container - run: just build-integration-test-image + # TODO: Also consider building + publishing an image that is just "base + bootc" + # as this implicitly changed to also publish our integration test images. + run: just build - name: Login to ghcr.io uses: redhat-actions/podman-login@v1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d4bb812a8..46712ae98 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -85,9 +85,9 @@ jobs: --tmpfs /var/lib/containers \ -v /run/dbus:/run/dbus -v /run/systemd:/run/systemd localhost/bootc /src/crates/ostree-ext/ci/priv-integration.sh # Nondestructive but privileged tests - sudo bootc-integration-tests host-privileged localhost/bootc-integration-install + sudo bootc-integration-tests host-privileged localhost/bootc-install # Install tests - sudo bootc-integration-tests install-alongside localhost/bootc-integration-install + sudo bootc-integration-tests install-alongside localhost/bootc-install # system-reinstall-bootc tests cargo build --release -p system-reinstall-bootc @@ -97,7 +97,7 @@ jobs: sudo install -m 0755 target/release/system-reinstall-bootc /usr/bin/system-reinstall-bootc # These tests may mutate the system live so we can't run in parallel - sudo bootc-integration-tests system-reinstall localhost/bootc-integration --test-threads=1 + sudo bootc-integration-tests system-reinstall localhost/bootc --test-threads=1 # And the fsverity case sudo podman run --privileged --pid=host localhost/bootc-fsverity bootc install to-existing-root --stateroot=other \ @@ -189,9 +189,9 @@ jobs: - name: Build container run: | - just build-integration-test-image-from-package target/packages + just build-from-packages target/packages # Extra cross-check (duplicating the integration test) that we're using the right base - used_vid=$(podman run --rm localhost/bootc-integration bash -c '. /usr/lib/os-release && echo ${ID}-${VERSION_ID}') + used_vid=$(podman run --rm localhost/bootc bash -c '. /usr/lib/os-release && echo ${ID}-${VERSION_ID}') test ${{ matrix.test_os }} = "${used_vid}" - name: Unit and container integration tests diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 151cd0efb..b395bd58b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -129,7 +129,7 @@ There's a small set of tests which are designed to run inside a bootc container and are built into the default container image: ``` -$ podman run --rm -ti localhost/bootc bootc-integration-tests container +$ just test-container ``` ## Submitting a patch diff --git a/Justfile b/Justfile index 4b1455f7b..37bfab62c 100644 --- a/Justfile +++ b/Justfile @@ -17,10 +17,8 @@ # This image is just the base image plus our updated bootc binary base_img := "localhost/bootc" -# Derives from the above and adds nushell, cloudinit etc. -integration_img := base_img + "-integration" # Has a synthetic upgrade -integration_upgrade_img := integration_img + "-upgrade" +upgrade_img := base_img + "-upgrade" # ostree: The default # composefs-sealeduki-sdboot: A system with a sealed composefs using systemd-boot @@ -57,9 +55,19 @@ sealed_buildargs := "--build-arg=variant=" + variant + " --secret=id=secureboot_ # # This first builds RPMs via the `package` target, then injects them # into the container image. -build: package _keygen +build: package _keygen && _pull-lbi-images @just _build-from-package target/packages +# Build container image using pre-existing packages from PATH. +# This skips the package build step - useful when packages are provided +# externally (e.g. downloaded from CI artifacts). +build-from-packages PATH: _keygen && _pull-lbi-images + @just _build-from-package {{PATH}} + +# Pull images used by hack/lbi +_pull-lbi-images: + podman pull -q --retry 5 --retry-delay 5s {{lbi_images}} + # Compute SOURCE_DATE_EPOCH and VERSION from git for reproducible builds. # Outputs shell variable assignments that can be eval'd. _git-build-vars: @@ -115,28 +123,13 @@ package: _packagecontainer chmod a+r target/packages/*.rpm podman rmi localhost/bootc-pkg -# Pull images used by hack/lbi -_pull-lbi-images: - podman pull -q --retry 5 --retry-delay 5s {{lbi_images}} - -# This container image has additional testing content and utilities -build-integration-test-image: build _pull-lbi-images - cd hack && podman build {{base_buildargs}} -t {{integration_img}}-bin -f Containerfile . - ./hack/build-sealed {{variant}} {{integration_img}}-bin {{integration_img}} {{sealed_buildargs}} - -# Build integration test image using pre-existing packages from PATH -build-integration-test-image-from-package PATH: _keygen _pull-lbi-images - @just _build-from-package {{PATH}} - cd hack && podman build {{base_buildargs}} -t {{integration_img}}-bin -f Containerfile . - ./hack/build-sealed {{variant}} {{integration_img}}-bin {{integration_img}} {{sealed_buildargs}} - # Build+test using the `composefs-sealeduki-sdboot` variant. test-composefs: just variant=composefs-sealeduki-sdboot test-tmt readonly local-upgrade-reboot # Only used by ci.yml right now -build-install-test-image: build-integration-test-image - cd hack && podman build {{base_buildargs}} -t {{integration_img}}-install -f Containerfile.drop-lbis +build-install-test-image: build + cd hack && podman build {{base_buildargs}} -t {{base_img}}-install -f Containerfile.drop-lbis # These tests accept the container image as input, and may spawn it. run-container-external-tests: @@ -158,28 +151,29 @@ validate: # # To run an individual test, pass it as an argument like: # `just test-tmt readonly` -test-tmt *ARGS: build-integration-test-image _build-upgrade-image +test-tmt *ARGS: build + @just _build-upgrade-image @just test-tmt-nobuild {{ARGS}} # Generate a local synthetic upgrade _build-upgrade-image: - cat tmt/tests/Dockerfile.upgrade | podman build -t {{integration_upgrade_img}}-bin --from={{integration_img}}-bin - - ./hack/build-sealed {{variant}} {{integration_upgrade_img}}-bin {{integration_upgrade_img}} {{sealed_buildargs}} + cat tmt/tests/Dockerfile.upgrade | podman build -t {{upgrade_img}}-bin --from={{base_img}}-bin - + ./hack/build-sealed {{variant}} {{upgrade_img}}-bin {{upgrade_img}} {{sealed_buildargs}} -# Assume the localhost/bootc-integration image is up to date, and just run tests. +# Assume the localhost/bootc image is up to date, and just run tests. # Useful for iterating on tests quickly. test-tmt-nobuild *ARGS: - cargo xtask run-tmt --env=BOOTC_variant={{variant}} --upgrade-image={{integration_upgrade_img}} {{integration_img}} {{ARGS}} + cargo xtask run-tmt --env=BOOTC_variant={{variant}} --upgrade-image={{upgrade_img}} {{base_img}} {{ARGS}} # Cleanup all test VMs created by tmt tests tmt-vm-cleanup: bcvk libvirt rm --stop --force --label bootc.test=1 # Run tests (unit and integration) that are containerized -test-container: build-units build-integration-test-image +test-container: build build-units podman run --rm --read-only localhost/bootc-units /usr/bin/bootc-units # Pass these through for cross-checking - podman run --rm --env=BOOTC_variant={{variant}} --env=BOOTC_base={{base}} {{integration_img}} bootc-integration-tests container + podman run --rm --env=BOOTC_variant={{variant}} --env=BOOTC_base={{base}} {{base_img}} bootc-integration-tests container # Remove all container images built (locally) via this Justfile, by matching a label clean-local-images: diff --git a/crates/xtask/src/xtask.rs b/crates/xtask/src/xtask.rs index 203a2f3de..97b267c64 100644 --- a/crates/xtask/src/xtask.rs +++ b/crates/xtask/src/xtask.rs @@ -58,7 +58,7 @@ enum Commands { /// Arguments for run-tmt command #[derive(Debug, Args)] pub(crate) struct RunTmtArgs { - /// Image name (e.g., "localhost/bootc-integration") + /// Image name (e.g., "localhost/bootc") pub(crate) image: String, /// Test plan filters (e.g., "readonly") @@ -73,7 +73,7 @@ pub(crate) struct RunTmtArgs { #[clap(long)] pub(crate) env: Vec, - /// Upgrade image to use when bind-storage-ro is available (e.g., localhost/bootc-integration-upgrade) + /// Upgrade image to use when bind-storage-ro is available (e.g., localhost/bootc-upgrade) #[clap(long)] pub(crate) upgrade_image: Option, @@ -85,7 +85,7 @@ pub(crate) struct RunTmtArgs { /// Arguments for tmt-provision command #[derive(Debug, Args)] pub(crate) struct TmtProvisionArgs { - /// Image name (e.g., "localhost/bootc-integration") + /// Image name (e.g., "localhost/bootc") pub(crate) image: String, /// VM name (defaults to "bootc-tmt-manual-") diff --git a/hack/Containerfile b/hack/Containerfile deleted file mode 100644 index 13dc120b9..000000000 --- a/hack/Containerfile +++ /dev/null @@ -1,16 +0,0 @@ -# Build a container image that has extra testing stuff in it, such -# as nushell, some preset logically bound images, etc. This expects -# to create an image derived FROM localhost/bootc which was created -# by the Dockerfile at top. - -FROM scratch as context -# We only need this stuff in the initial context -COPY . / - -# And the configs -FROM localhost/bootc -RUN --mount=type=bind,from=context,target=/run/context <&2; exit 1 ;; +esac + # Ensure this is pre-created mkdir -p -m 0700 /var/roothome mkdir -p ~/.config/nushell @@ -44,11 +51,12 @@ grep -Ev -e '^#' packages.txt | xargs dnf -y install cat <> /usr/lib/bootc/kargs.d/20-console.toml kargs = ["console=ttyS0,115200n8"] KARGEOF -dnf -y install cloud-init -ln -s ../cloud-init.target /usr/lib/systemd/system/default.target.wants +if test $cloudinit = 1; then + dnf -y install cloud-init + ln -s ../cloud-init.target /usr/lib/systemd/system/default.target.wants # Allow root SSH login for testing with bcvk/tmt -mkdir -p /etc/cloud/cloud.cfg.d -cat > /etc/cloud/cloud.cfg.d/80-enable-root.cfg <<'CLOUDEOF' + mkdir -p /etc/cloud/cloud.cfg.d + cat > /etc/cloud/cloud.cfg.d/80-enable-root.cfg <<'CLOUDEOF' # Enable root login for testing disable_root: false @@ -59,6 +67,7 @@ growpart: devices: ["/sysroot"] resize_rootfs: false CLOUDEOF +fi dnf clean all # Stock extra cleaning of logs and caches in general (mostly dnf) diff --git a/hack/provision-packit.sh b/hack/provision-packit.sh index 7f1848bf8..9ec9a144a 100755 --- a/hack/provision-packit.sh +++ b/hack/provision-packit.sh @@ -86,7 +86,7 @@ cp /etc/yum.repos.d/test-artifacts.repo "$BOOTC_TEMPDIR" ls -al "$BOOTC_TEMPDIR" # Do not use just because it's only available on Fedora, not on CS and RHEL -podman build --jobs=4 --from "$BASE" -v "$BOOTC_TEMPDIR":/bootc-test:z -t localhost/bootc-integration -f "${BOOTC_TEMPDIR}/Containerfile.packit" "$BOOTC_TEMPDIR" +podman build --jobs=4 --from "$BASE" -v "$BOOTC_TEMPDIR":/bootc-test:z -t localhost/bootc -f "${BOOTC_TEMPDIR}/Containerfile.packit" "$BOOTC_TEMPDIR" # Keep these in sync with what's used in hack/lbi podman pull -q --retry 5 --retry-delay 5s quay.io/curl/curl:latest quay.io/curl/curl-base:latest registry.access.redhat.com/ubi9/podman:latest diff --git a/hack/system-reinstall-bootc.exp b/hack/system-reinstall-bootc.exp index 760033095..54effbd74 100755 --- a/hack/system-reinstall-bootc.exp +++ b/hack/system-reinstall-bootc.exp @@ -3,7 +3,7 @@ # Set a timeout set timeout 600 -spawn system-reinstall-bootc localhost/bootc-integration +spawn system-reinstall-bootc localhost/bootc expect { "Then you can login as * using those keys. \\\[Y/n\\\]" { diff --git a/tmt/tests/Dockerfile.upgrade b/tmt/tests/Dockerfile.upgrade index ab3b73c7c..a9e36ba50 100644 --- a/tmt/tests/Dockerfile.upgrade +++ b/tmt/tests/Dockerfile.upgrade @@ -1,3 +1,3 @@ # Just creates a file as a new layer for a synthetic upgrade test -FROM localhost/bootc-integration +FROM localhost/bootc RUN touch --reference=/usr/bin/bash /usr/share/testing-bootc-upgrade-apply From 6c51c9787ea753053e1c73fda2f460d6707180eb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 Dec 2025 15:45:39 -0500 Subject: [PATCH 07/10] build-sys: Keep bootc-pkg image for layer caching Removing localhost/bootc-pkg at the end of the package target also deletes the build stage layers, causing subsequent builds to miss the cache and rebuild the RPMs from scratch. Keep the image around; use `just clean-local-images` to reclaim space. Signed-off-by: Colin Walters --- Justfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Justfile b/Justfile index 37bfab62c..ff520a93b 100644 --- a/Justfile +++ b/Justfile @@ -121,7 +121,7 @@ package: _packagecontainer podman run --rm localhost/bootc-pkg tar -C /out/ -cf - . | tar -C target/packages/ -xvf - chmod a+rx target target/packages chmod a+r target/packages/*.rpm - podman rmi localhost/bootc-pkg + # Keep localhost/bootc-pkg for layer caching; use `just clean-local-images` to reclaim space # Build+test using the `composefs-sealeduki-sdboot` variant. test-composefs: From 00063f0430352c863961e40ccad8d3b447a57ac0 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 Dec 2025 16:36:30 -0500 Subject: [PATCH 08/10] build-sys: Consistently use `RUN --network=none` and add check Ensure all RUN instructions after the "external dependency cutoff point" marker include `--network=none` right after `RUN`. This enforces that external dependencies are clearly delineated in the early stages of the Dockerfile. The check is part of `cargo xtask check-buildsys` and includes unit tests. Assisted-by: OpenCode (Sonnet 4) Signed-off-by: Colin Walters --- Dockerfile | 17 ++-- crates/xtask/src/buildsys.rs | 165 +++++++++++++++++++++++++++++++++++ crates/xtask/src/xtask.rs | 48 +--------- 3 files changed, 177 insertions(+), 53 deletions(-) create mode 100644 crates/xtask/src/buildsys.rs diff --git a/Dockerfile b/Dockerfile index ef12cedec..567432161 100644 --- a/Dockerfile +++ b/Dockerfile @@ -63,8 +63,12 @@ ENV container=oci STOPSIGNAL SIGRTMIN+3 CMD ["/sbin/init"] +# ------------- +# external dependency cutoff point: # NOTE: Every RUN instruction past this point should use `--network=none`; we want to ensure # all external dependencies are clearly delineated. +# This is verified in `cargo xtask check-buildsys`. +# ------------- FROM buildroot as build # Version for RPM build (optional, computed from git in Justfile) @@ -73,7 +77,7 @@ ARG pkgversion ARG SOURCE_DATE_EPOCH ENV SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH} # Build RPM directly from source, using cached target directory -RUN --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome --network=none RPM_VERSION="${pkgversion}" /src/contrib/packaging/build-rpm +RUN --network=none --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome RPM_VERSION="${pkgversion}" /src/contrib/packaging/build-rpm FROM buildroot as sdboot-signed # The secureboot key and cert are passed via Justfile @@ -89,11 +93,11 @@ FROM build as units # A place that we're more likely to be able to set xattrs VOLUME /var/tmp ENV TMPDIR=/var/tmp -RUN --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome --network=none make install-unit-tests +RUN --network=none --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome make install-unit-tests # This just does syntax checking FROM buildroot as validate -RUN --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome --network=none make validate +RUN --network=none --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome make validate # Common base for final images: configures variant, rootfs, and injects extra content FROM base as final-common @@ -103,13 +107,12 @@ RUN --network=none --mount=type=bind,from=packaging,target=/run/packaging \ --mount=type=bind,from=sdboot-signed,target=/run/sdboot-signed \ /run/packaging/configure-variant "${variant}" ARG rootfs="" -RUN --mount=type=bind,from=packaging,target=/run/packaging /run/packaging/configure-rootfs "${variant}" "${rootfs}" +RUN --network=none --mount=type=bind,from=packaging,target=/run/packaging /run/packaging/configure-rootfs "${variant}" "${rootfs}" COPY --from=packaging /usr-extras/ /usr/ # Final target: installs pre-built packages from /run/packages volume mount. # Use with: podman build --target=final -v path/to/packages:/run/packages:ro FROM final-common as final -RUN --mount=type=bind,from=packaging,target=/run/packaging \ - --network=none \ +RUN --network=none --mount=type=bind,from=packaging,target=/run/packaging \ /run/packaging/install-rpm-and-setup /run/packages -RUN bootc container lint --fatal-warnings +RUN --network=none bootc container lint --fatal-warnings diff --git a/crates/xtask/src/buildsys.rs b/crates/xtask/src/buildsys.rs new file mode 100644 index 000000000..9f26a288a --- /dev/null +++ b/crates/xtask/src/buildsys.rs @@ -0,0 +1,165 @@ +//! Build system validation checks. + +use std::collections::BTreeMap; + +use anyhow::{Context, Result}; +use camino::{Utf8Path, Utf8PathBuf}; +use fn_error_context::context; +use xshell::{cmd, Shell}; + +const DOCKERFILE_NETWORK_CUTOFF: &str = "external dependency cutoff point"; + +/// Check build system properties +/// +/// - Reproducible builds for the RPM +/// - Dockerfile network isolation after cutoff point +#[context("Checking build system")] +pub fn check_buildsys(sh: &Shell, dockerfile_path: &Utf8Path) -> Result<()> { + check_package_reproducibility(sh)?; + check_dockerfile_network_isolation(dockerfile_path)?; + Ok(()) +} + +/// Verify that consecutive `just package` invocations produce identical RPM checksums. +#[context("Checking package reproducibility")] +fn check_package_reproducibility(sh: &Shell) -> Result<()> { + println!("Checking reproducible builds..."); + // Helper to compute SHA256 of bootc RPMs in target/packages/ + fn get_rpm_checksums(sh: &Shell) -> Result> { + // Find bootc*.rpm files in target/packages/ + let packages_dir = Utf8Path::new("target/packages"); + let mut rpm_files: Vec = Vec::new(); + for entry in std::fs::read_dir(packages_dir).context("Reading target/packages")? { + let entry = entry?; + let path = Utf8PathBuf::try_from(entry.path())?; + if path.extension() == Some("rpm") { + rpm_files.push(path); + } + } + + assert!(!rpm_files.is_empty()); + + let mut checksums = BTreeMap::new(); + for rpm_path in &rpm_files { + let output = cmd!(sh, "sha256sum {rpm_path}").read()?; + let (hash, filename) = output + .split_once(" ") + .with_context(|| format!("failed to parse sha256sum output: '{}'", output))?; + checksums.insert(filename.to_owned(), hash.to_owned()); + } + Ok(checksums) + } + + cmd!(sh, "just package").run()?; + let first_checksums = get_rpm_checksums(sh)?; + cmd!(sh, "just package").run()?; + let second_checksums = get_rpm_checksums(sh)?; + + itertools::assert_equal(first_checksums, second_checksums); + println!("ok package reproducibility"); + + Ok(()) +} + +/// Verify that all RUN instructions in the Dockerfile after the network cutoff +/// point include `--network=none`. +#[context("Checking Dockerfile network isolation")] +fn check_dockerfile_network_isolation(dockerfile_path: &Utf8Path) -> Result<()> { + println!("Checking Dockerfile network isolation..."); + let dockerfile = std::fs::read_to_string(dockerfile_path).context("Reading Dockerfile")?; + verify_dockerfile_network_isolation(&dockerfile)?; + println!("ok Dockerfile network isolation"); + Ok(()) +} + +const RUN_NETWORK_NONE: &str = "RUN --network=none"; + +/// Verify that all RUN instructions after the network cutoff marker start with +/// `RUN --network=none`. +/// +/// Returns Ok(()) if all RUN instructions comply, or an error listing violations. +pub fn verify_dockerfile_network_isolation(dockerfile: &str) -> Result<()> { + // Find the cutoff point + let cutoff_line = dockerfile + .lines() + .position(|line| line.contains(DOCKERFILE_NETWORK_CUTOFF)) + .ok_or_else(|| { + anyhow::anyhow!( + "Dockerfile missing '{}' marker comment", + DOCKERFILE_NETWORK_CUTOFF + ) + })?; + + // Check all RUN instructions after the cutoff point + let mut errors = Vec::new(); + + for (idx, line) in dockerfile.lines().enumerate().skip(cutoff_line + 1) { + let line_num = idx + 1; // 1-based line numbers + let trimmed = line.trim(); + + // Check if this is a RUN instruction + if trimmed.starts_with("RUN ") { + // Must start with exactly "RUN --network=none" + if !trimmed.starts_with(RUN_NETWORK_NONE) { + errors.push(format!( + " line {}: RUN instruction must start with `{}`", + line_num, RUN_NETWORK_NONE + )); + } + } + } + + if !errors.is_empty() { + anyhow::bail!( + "Dockerfile has RUN instructions after '{}' that don't start with `{}`:\n{}", + DOCKERFILE_NETWORK_CUTOFF, + RUN_NETWORK_NONE, + errors.join("\n") + ); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_network_isolation_valid() { + let dockerfile = r#" +FROM base +RUN echo "before cutoff, no network restriction needed" +# external dependency cutoff point +RUN --network=none echo "good" +RUN --network=none --mount=type=bind,from=foo,target=/bar some-command +"#; + verify_dockerfile_network_isolation(dockerfile).unwrap(); + } + + #[test] + fn test_network_isolation_missing_flag() { + let dockerfile = r#" +FROM base +# external dependency cutoff point +RUN --network=none echo "good" +RUN echo "bad - missing network flag" +"#; + let err = verify_dockerfile_network_isolation(dockerfile).unwrap_err(); + let msg = err.to_string(); + assert!(msg.contains("line 5"), "error should mention line 5: {msg}"); + } + + #[test] + fn test_network_isolation_wrong_position() { + // --network=none must come immediately after RUN + let dockerfile = r#" +FROM base +# external dependency cutoff point +RUN --mount=type=bind,from=foo,target=/bar --network=none echo "bad" +"#; + let err = verify_dockerfile_network_isolation(dockerfile).unwrap_err(); + let msg = err.to_string(); + assert!(msg.contains("line 4"), "error should mention line 4: {msg}"); + } +} diff --git a/crates/xtask/src/xtask.rs b/crates/xtask/src/xtask.rs index 97b267c64..6921a681f 100644 --- a/crates/xtask/src/xtask.rs +++ b/crates/xtask/src/xtask.rs @@ -14,6 +14,7 @@ use clap::{Args, Parser, Subcommand}; use fn_error_context::context; use xshell::{cmd, Shell}; +mod buildsys; mod man; mod tmt; @@ -137,7 +138,7 @@ fn try_main() -> Result<()> { Commands::Spec => spec(&sh), Commands::RunTmt(args) => tmt::run_tmt(&sh, &args), Commands::TmtProvision(args) => tmt::tmt_provision(&sh, &args), - Commands::CheckBuildsys => check_buildsys(&sh), + Commands::CheckBuildsys => buildsys::check_buildsys(&sh, "Dockerfile".into()), } } @@ -405,48 +406,3 @@ fn update_generated(sh: &Shell) -> Result<()> { Ok(()) } - -/// Check build system properties -/// -/// - Reproducible builds for the RPM -#[context("Checking build system")] -fn check_buildsys(sh: &Shell) -> Result<()> { - use std::collections::BTreeMap; - - println!("Checking reproducible builds..."); - // Helper to compute SHA256 of bootc RPMs in target/packages/ - fn get_rpm_checksums(sh: &Shell) -> Result> { - // Find bootc*.rpm files in target/packages/ - let packages_dir = Utf8Path::new("target/packages"); - let mut rpm_files: Vec = Vec::new(); - for entry in std::fs::read_dir(packages_dir).context("Reading target/packages")? { - let entry = entry?; - let path = Utf8PathBuf::try_from(entry.path())?; - if path.extension() == Some("rpm") { - rpm_files.push(path); - } - } - - assert!(!rpm_files.is_empty()); - - let mut checksums = BTreeMap::new(); - for rpm_path in &rpm_files { - let output = cmd!(sh, "sha256sum {rpm_path}").read()?; - let (hash, filename) = output - .split_once(" ") - .with_context(|| format!("failed to parse sha256sum output: '{}'", output))?; - checksums.insert(filename.to_owned(), hash.to_owned()); - } - Ok(checksums) - } - - cmd!(sh, "just package").run()?; - let first_checksums = get_rpm_checksums(sh)?; - cmd!(sh, "just package").run()?; - let second_checksums = get_rpm_checksums(sh)?; - - itertools::assert_equal(first_checksums, second_checksums); - println!("ok package reproducibility"); - - Ok(()) -} From d220d98b7260a5efbfff47e258b2049045be2aac Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 Dec 2025 11:01:36 -0500 Subject: [PATCH 09/10] tests: Fix incorrect prune Now that we're building a from-scratch image it won't have `/ostree` in it; this line was always pruning the wrong repo. Signed-off-by: Colin Walters --- crates/ostree-ext/ci/priv-integration.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crates/ostree-ext/ci/priv-integration.sh b/crates/ostree-ext/ci/priv-integration.sh index aa1d588f6..2a6acea7d 100755 --- a/crates/ostree-ext/ci/priv-integration.sh +++ b/crates/ostree-ext/ci/priv-integration.sh @@ -8,6 +8,7 @@ set -euo pipefail mkdir -p /var/tmp sysroot=/run/host +repo="${sysroot}/ostree/repo" # Current stable image fixture image=quay.io/fedora/fedora-coreos:testing-devel imgref=ostree-unverified-registry:${image} @@ -111,10 +112,6 @@ derived_img_dir=dir:/var/tmp/derived.dir systemd-run -dP --wait skopeo copy containers-storage:localhost/fcos-derived "${derived_img}" systemd-run -dP --wait skopeo copy "${derived_img}" "${derived_img_dir}" -# Prune to reset state -ostree refs ostree/container/image --delete - -repo="${sysroot}/ostree/repo" images=$(ostree container image list --repo "${repo}" | wc -l) test "${images}" -eq 1 ostree container image deploy --sysroot "${sysroot}" \ From 104799c4dba4ab0eeab8824f5fc78b58316e9c32 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 Dec 2025 11:27:24 -0500 Subject: [PATCH 10/10] build-sys: Simplify build recipes and add BOOTC_SKIP_PACKAGE Remove the separate build-from-packages and _build-from-package helper recipes. The build logic is now inlined directly in the build recipe. Add BOOTC_SKIP_PACKAGE=1 environment variable support to skip the package build step when packages are provided externally (e.g. from CI artifacts). This is used in ci.yml for the test-integration job. Assisted-by: OpenCode (Sonnet 4) Signed-off-by: Colin Walters --- .github/workflows/ci.yml | 2 +- Justfile | 57 +++++++++++++++++++--------------------- 2 files changed, 28 insertions(+), 31 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 46712ae98..b0f720fc7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -189,7 +189,7 @@ jobs: - name: Build container run: | - just build-from-packages target/packages + BOOTC_SKIP_PACKAGE=1 just build # Extra cross-check (duplicating the integration test) that we're using the right base used_vid=$(podman run --rm localhost/bootc bash -c '. /usr/lib/os-release && echo ${ID}-${VERSION_ID}') test ${{ matrix.test_os }} = "${used_vid}" diff --git a/Justfile b/Justfile index ff520a93b..6bf6edc9e 100644 --- a/Justfile +++ b/Justfile @@ -52,17 +52,18 @@ sealed_buildargs := "--build-arg=variant=" + variant + " --secret=id=secureboot_ # The default target: build the container image from current sources. # Note commonly you might want to override the base image via e.g. # `just build --build-arg=base=quay.io/fedora/fedora-bootc:42` -# -# This first builds RPMs via the `package` target, then injects them # into the container image. +# +# Note you can set `BOOTC_SKIP_PACKAGE=1` in the environment to bypass this stage. build: package _keygen && _pull-lbi-images - @just _build-from-package target/packages - -# Build container image using pre-existing packages from PATH. -# This skips the package build step - useful when packages are provided -# externally (e.g. downloaded from CI artifacts). -build-from-packages PATH: _keygen && _pull-lbi-images - @just _build-from-package {{PATH}} + #!/bin/bash + set -xeuo pipefail + test -d target/packages + # Resolve to absolute path for podman volume mount + # Use :z for SELinux relabeling + pkg_path=$(realpath target/packages) + podman build --target=final -v "${pkg_path}":/run/packages:ro,z -t {{base_img}}-bin {{buildargs}} . + ./hack/build-sealed {{variant}} {{base_img}}-bin {{base_img}} {{sealed_buildargs}} # Pull images used by hack/lbi _pull-lbi-images: @@ -91,36 +92,32 @@ _git-build-vars: _keygen: ./hack/generate-secureboot-keys -# Internal helper: build container image from packages at PATH -_build-from-package PATH: - #!/bin/bash - set -xeuo pipefail - # Resolve to absolute path for podman volume mount - # Use :z for SELinux relabeling - pkg_path=$(realpath "{{PATH}}") - podman build --target=final -v "${pkg_path}":/run/packages:ro,z -t {{base_img}}-bin {{buildargs}} . - ./hack/build-sealed {{variant}} {{base_img}}-bin {{base_img}} {{sealed_buildargs}} - # Build a sealed image from current sources. build-sealed: @just --justfile {{justfile()}} variant=composefs-sealeduki-sdboot build -# Build packages (e.g. RPM) using a container buildroot -_packagecontainer: +# Build packages (e.g. RPM) into target/packages/ +# Any old packages will be removed. +# Set BOOTC_SKIP_PACKAGE=1 in the environment to bypass this stage. We don't +# yet have an accurate ability to avoid rebuilding this in CI yet. +package: #!/bin/bash set -xeuo pipefail + packages=target/packages + if test -n "${BOOTC_SKIP_PACKAGE:-}"; then + if test '!' -d "${packages}"; then + echo "BOOTC_SKIP_PACKAGE is set, but missing ${packages}" 1>&2; exit 1 + fi + exit 0 + fi eval $(just _git-build-vars) echo "Building RPM with version: ${VERSION}" podman build {{base_buildargs}} --build-arg=SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH} --build-arg=pkgversion=${VERSION} -t localhost/bootc-pkg --target=build . - -# Build packages (e.g. RPM) into target/packages/ -# Any old packages will be removed. -package: _packagecontainer - mkdir -p target/packages - rm -vf target/packages/*.rpm - podman run --rm localhost/bootc-pkg tar -C /out/ -cf - . | tar -C target/packages/ -xvf - - chmod a+rx target target/packages - chmod a+r target/packages/*.rpm + mkdir -p "${packages}" + rm -vf "${packages}"/*.rpm + podman run --rm localhost/bootc-pkg tar -C /out/ -cf - . | tar -C "${packages}"/ -xvf - + chmod a+rx target "${packages}" + chmod a+r "${packages}"/*.rpm # Keep localhost/bootc-pkg for layer caching; use `just clean-local-images` to reclaim space # Build+test using the `composefs-sealeduki-sdboot` variant.