diff --git a/.github/workflows/build-pr.yml b/.github/workflows/build-pr.yml deleted file mode 100644 index 3768205d..00000000 --- a/.github/workflows/build-pr.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: build-pr - -on: - pull_request: - branches: - - main - -jobs: - run: - runs-on: ubuntu-latest - - strategy: - matrix: - go: ['1.21', '1.22'] - - steps: - - name: checkout source code - uses: actions/checkout@v4 - - - name: setup go environment - uses: actions/setup-go@v5 - with: - go-version: ${{ matrix.go }} - - - name: run tests - id: tests - run: | - export PATH="$(go env GOPATH)/bin:${PATH}" - make install.tools - make .gitvalidation - make docs conformance - - set +e - make registry-ci conformance-ci - CONFORMANCE_RC="$?" - set -e - if [[ -f report.html ]]; then - echo "Found report.html." - echo "has-report=true" >> $GITHUB_OUTPUT - fi - echo "Conformance return code: ${CONFORMANCE_RC}" - exit ${CONFORMANCE_RC} - - - name: Upload OCI conformance results as build artifact - if: always() && steps.tests.outputs.has-report == 'true' - uses: actions/upload-artifact@v4 - with: - name: oci-conformance-results-${{ matrix.go }} - path: | - ./report.html - ./junit.xml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9cfb560c..07681bc8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -4,6 +4,9 @@ on: push: branches: - main + pull_request: + branches: + - main jobs: run: @@ -11,7 +14,7 @@ jobs: strategy: matrix: - go: ['1.21', '1.22'] + go: ['stable', 'oldstable'] steps: - name: checkout source code @@ -47,5 +50,6 @@ jobs: with: name: oci-conformance-results-${{ matrix.go }} path: | + ./results.yaml ./report.html ./junit.xml diff --git a/.github/workflows/conformance-action-pr.yml b/.github/workflows/conformance-action-pr.yml deleted file mode 100644 index cb8ac26c..00000000 --- a/.github/workflows/conformance-action-pr.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: conformance-action-pr - -on: - pull_request: - branches: - - main - -jobs: - run: - runs-on: ubuntu-latest - steps: - - name: checkout source code - uses: actions/checkout@v4 - - name: Start a test registry (zot) - run: | - set -x - make registry-ci - - name: Run OCI distribution-spec conformance - env: - OCI_ROOT_URL: http://localhost:5000 - OCI_NAMESPACE: myorg/myrepo - OCI_TEST_PULL: 1 - OCI_TEST_PUSH: 1 - OCI_TEST_CONTENT_DISCOVERY: 1 - OCI_TEST_CONTENT_MANAGEMENT: 1 - uses: ./ diff --git a/.github/workflows/conformance-action.yml b/.github/workflows/conformance-action.yml index 23a76fbf..aaffbf2d 100644 --- a/.github/workflows/conformance-action.yml +++ b/.github/workflows/conformance-action.yml @@ -4,6 +4,9 @@ on: push: branches: - main + pull_request: + branches: + - main jobs: run: @@ -11,16 +14,17 @@ jobs: steps: - name: checkout source code uses: actions/checkout@v4 - - name: Start a test registry (zot) + - name: Start a test registry + id: setup run: | set -x make registry-ci + echo "port=$(docker port oci-conformance-olareg 5000| head -1 | cut -f2 -d:)" >>$GITHUB_OUTPUT - name: Run OCI distribution-spec conformance env: - OCI_ROOT_URL: http://localhost:5000 - OCI_NAMESPACE: myorg/myrepo - OCI_TEST_PULL: 1 - OCI_TEST_PUSH: 1 - OCI_TEST_CONTENT_DISCOVERY: 1 - OCI_TEST_CONTENT_MANAGEMENT: 1 + OCI_REGISTRY: "localhost:${{ steps.setup.outputs.port }}" + OCI_TLS: "disabled" + OCI_REPO1: "myorg/myrepo" + OCI_REPO2: "myorg/myrepo2" + OCI_RESULTS_DIR: "." uses: ./ diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 83650358..c90b8393 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -26,10 +26,6 @@ jobs: fi VERSION=$(echo "${VERSION}" | sed -r 's#/+#-#g') TAGS="${IMAGE}:${VERSION}" - if [[ $VERSION == "${{ github.event.repository.default_branch }}" ]]; then - GITSHA="$(git rev-parse --short HEAD)" - TAGS="${TAGS},${IMAGE}:${GITSHA}" - fi if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then MINOR=${VERSION%.*} MAJOR=${MINOR%.*} diff --git a/.gitignore b/.gitignore index 33b56263..173985d0 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ tags go.work junit.xml report.html +results.yaml diff --git a/Makefile b/Makefile index 87ce0eb4..1d71d3cf 100644 --- a/Makefile +++ b/Makefile @@ -23,14 +23,13 @@ ifeq "$(strip $(PANDOC))" '' endif endif -GOLANGCILINT_CONTAINER ?= ghcr.io/opencontainers/golangci-lint:v1.52.1@sha256:d3d3d56f9706ebe843c1b06686c385877ba65b33f39507cdbeb22f482adce65a +GOLANGCILINT_CONTAINER ?= docker.io/golangci/golangci-lint:v2.11.3@sha256:e838e8ab68aaefe83e2408691510867ade9329c0e0b895a3fb35eb93d1c2a4ba ifeq "$(strip $(GOLANGCILINT))" '' ifneq "$(strip $(DOCKER))" '' GOLANGCILINT = $(DOCKER) run \ --rm \ -v $(shell pwd)/:/input:ro \ -e GOCACHE=/tmp/.cache \ - -e GO111MODULE=on \ -e GOLANGCI_LINT_CACHE=/tmp/.cache \ --entrypoint /bin/bash \ -u $(shell id -u) \ @@ -83,35 +82,70 @@ install.tools: .install.gitvalidation .install.gitvalidation: go install github.com/vbatts/git-validation@latest -conformance: conformance-test conformance-binary +conformance: conformance-test conformance-cmd conformance-test: $(GOLANGCILINT) -c 'cd conformance && golangci-lint run -v' conformance-binary: $(OUTPUT_DIRNAME)/conformance.test -TEST_REGISTRY_CONTAINER ?= ghcr.io/project-zot/zot-minimal-linux-amd64:v2.1.7@sha256:2114797f00696011f38cc94c72f5773c84b1036562df5034d05ea19075179ad1 -registry-ci: - docker rm -f oci-conformance && \ +conformance-cmd: $(OUTPUT_DIRNAME)/conformance + +registry-ci: registry-ci-olareg + +TEST_REGISTRY_IMAGE_OLAREG ?= ghcr.io/olareg/olareg:edge +registry-ci-olareg: + docker rm -f oci-conformance-olareg && \ + docker run --rm -d \ + --name=oci-conformance-olareg \ + -p 5000 \ + $(TEST_REGISTRY_IMAGE_OLAREG) serve --store-type mem --api-delete --api-blob-delete --api-sparse-image --api-sparse-index && \ + sleep 2 + +TEST_REGISTRY_IMAGE_ZOT ?= ghcr.io/project-zot/zot-minimal-linux-amd64:v2.1.7@sha256:2114797f00696011f38cc94c72f5773c84b1036562df5034d05ea19075179ad1 +registry-ci-zot: + docker rm -f oci-conformance-zot && \ mkdir -p $(OUTPUT_DIRNAME) && \ echo '{"distSpecVersion":"1.1.0-dev","storage":{"rootDirectory":"/tmp/zot","gc":false,"dedupe":false},"http":{"address":"0.0.0.0","port":"5000"},"log":{"level":"debug"}}' > $(shell pwd)/$(OUTPUT_DIRNAME)/zot-config.json - docker run -d \ + docker run --rm -d \ -v $(shell pwd)/$(OUTPUT_DIRNAME)/zot-config.json:/etc/zot/config.json \ - --name=oci-conformance \ - -p 5000:5000 \ - $(TEST_REGISTRY_CONTAINER) && \ + --name=oci-conformance-zot \ + -p 5000 \ + $(TEST_REGISTRY_IMAGE_ZOT) && \ sleep 5 -conformance-ci: - export OCI_ROOT_URL="http://localhost:5000" && \ - export OCI_NAMESPACE="myorg/myrepo" && \ - export OCI_TEST_PULL=1 && \ - export OCI_TEST_PUSH=1 && \ - export OCI_TEST_CONTENT_DISCOVERY=1 && \ - export OCI_TEST_CONTENT_MANAGEMENT=1 && \ - $(shell pwd)/$(OUTPUT_DIRNAME)/conformance.test +conformance-ci: conformance-ci-olareg + +conformance-ci-olareg: $(OUTPUT_DIRNAME)/conformance + export OCI_VERSION="dev" && \ + export OCI_REGISTRY="localhost:$$(docker port oci-conformance-olareg 5000| head -1 | cut -f2 -d:)" && \ + export OCI_TLS="disabled" && \ + export OCI_REPO1="myorg/myrepo" && \ + export OCI_REPO2="myorg/myrepo2" && \ + export OCI_RESULTS_DIR="." && \ + export OCI_DATA_SPARSE=true && \ + $(shell pwd)/$(OUTPUT_DIRNAME)/conformance + +conformance-ci-zot: $(OUTPUT_DIRNAME)/conformance + export OCI_REGISTRY="localhost:$$(docker port oci-conformance-zot 5000| head -1 | cut -f2 -d:)" && \ + export OCI_TLS="disabled" && \ + export OCI_REPO1="myorg/myrepo" && \ + export OCI_REPO2="myorg/myrepo2" && \ + export OCI_RESULTS_DIR="." && \ + $(shell pwd)/$(OUTPUT_DIRNAME)/conformance + +clean-ci: + docker rm -f oci-conformance-olareg oci-conformance-zot + +$(OUTPUT_DIRNAME)/conformance: conformance/*.go conformance/go.mod + cd conformance && \ + CGO_ENABLED=0 go build -o $(shell pwd)/$(OUTPUT_DIRNAME)/conformance \ + --ldflags="-X github.com/opencontainers/distribution-spec/conformance.Version=$(CONFORMANCE_VERSION)" -$(OUTPUT_DIRNAME)/conformance.test: +$(OUTPUT_DIRNAME)/conformance.test: conformance/*.go conformance/go.mod cd conformance && \ CGO_ENABLED=0 go test -c -o $(shell pwd)/$(OUTPUT_DIRNAME)/conformance.test \ --ldflags="-X github.com/opencontainers/distribution-spec/conformance.Version=$(CONFORMANCE_VERSION)" + +clean: clean-ci + rm -rf header.html junit.xml report.html results.yaml output conformance/results diff --git a/action.yml b/action.yml index eed1a577..b66d43fa 100644 --- a/action.yml +++ b/action.yml @@ -14,16 +14,39 @@ runs: id: check-conformance run: | set -x + required_env_vars=( + "OCI_REGISTRY" + "OCI_REPO1" + ) + missing=false + for v in ${required_env_vars[@]}; do + if [[ "${!v}" == "" ]]; then + echo "::error title=Missing variable::Missing required variable ${v}" + missing=true + fi + done + if [[ "${missing}" == "false" ]]; then + exit 0 + fi + # fallback to previous variables required_env_vars=( "OCI_ROOT_URL" "OCI_NAMESPACE" ) + missing=false for v in ${required_env_vars[@]}; do if [[ "${!v}" == "" ]]; then - echo "Error: the following environment variable is required: ${v}" - exit 1 + echo "::error title=Missing fallback variable::Missing fallback variable ${v}" + missing=true + else + echo "::warning title=Deprecated fallback variable::Variable ${v} has been deprecated" fi done + if [[ "${missing}" == "false" ]]; then + exit 0 + else + exit 1 + fi - name: Build OCI distribution-spec conformance binary shell: bash @@ -34,14 +57,16 @@ runs: # Enter the directory containing the checkout of this action which is surpisingly hard to do (but we did it... #OCI) cd "$(dirname $(find $(find ~/work/_actions -name distribution-spec -print -quit) -name Makefile -print -quit))" - # The .git folder is not present, but the dirname is the requested action ref, so use this as the conformance version - conformance_version="$(basename "${PWD}")" - echo "conformance-version=${conformance_version}" >> $GITHUB_OUTPUT + # The .git folder is not present, but the dirname is the requested action ref, so use this as the commit version + commit_version="$(basename "${PWD}")" # Build the conformance binary - CONFORMANCE_VERSION="${conformance_version}" OUTPUT_DIRNAME=bin make conformance-binary + CONFORMANCE_VERSION="${commit_version}" OUTPUT_DIRNAME=bin make conformance-cmd + + # The spec version is independent of the conformance git commit + echo "oci-spec-version=${OCI_VERSION:-stable}" >> $GITHUB_OUTPUT - # Add bin to the PATH so we can just run "conformance.test" + # Add bin to the PATH so we can just run "conformance" echo "${PWD}/bin" >> $GITHUB_PATH - name: Run OCI distribution-spec conformance binary @@ -50,7 +75,8 @@ runs: run: | set -x set +e - conformance.test + OCI_RESULTS_DIR=${OCI_RESULTS_DIR:-.} + conformance conformance_rc="$?" set -e if [[ -f report.html ]]; then @@ -62,9 +88,10 @@ runs: - name: Upload OCI distribution-spec conformance results as build artifact if: always() && steps.run-conformance.outputs.has-report == 'true' - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: - name: oci-distribution-spec-conformance-results-${{ steps.build-conformance.outputs.conformance-version }} + name: oci-distribution-spec-conformance-results-${{ steps.build-conformance.outputs.oci-spec-version }} path: | + results.yaml report.html junit.xml diff --git a/conformance/.gitignore b/conformance/.gitignore index f5a764ee..5cd8f655 100644 --- a/conformance/.gitignore +++ b/conformance/.gitignore @@ -1,6 +1,5 @@ -vendor/ -junit.xml -report.html +conformance conformance.test -tags -env.sh +oci-conformance.yaml +results/** +vendor/ diff --git a/conformance/00_conformance_suite_test.go b/conformance/00_conformance_suite_test.go deleted file mode 100644 index 157f4d99..00000000 --- a/conformance/00_conformance_suite_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright contributors to the Open Containers Distribution Specification -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package conformance - -import ( - "log" - "testing" - - g "github.com/onsi/ginkgo/v2" - "github.com/onsi/ginkgo/v2/reporters" - . "github.com/onsi/gomega" -) - -func TestConformance(t *testing.T) { - g.Describe(suiteDescription, func() { - test01Pull() - test02Push() - test03ContentDiscovery() - test04ContentManagement() - }) - - RegisterFailHandler(g.Fail) - suiteConfig, reporterConfig := g.GinkgoConfiguration() - hr := newHTMLReporter(reportHTMLFilename) - g.ReportAfterEach(hr.afterReport) - g.ReportAfterSuite("html custom reporter", func(r g.Report) { - if err := hr.endSuite(r); err != nil { - log.Printf("\nWARNING: cannot write HTML summary report: %v", err) - } - }) - g.ReportAfterSuite("junit custom reporter", func(r g.Report) { - if reportJUnitFilename != "" { - _ = reporters.GenerateJUnitReportWithConfig(r, reportJUnitFilename, reporters.JunitReportConfig{ - OmitLeafNodeType: true, - }) - } - }) - g.RunSpecs(t, "conformance tests", suiteConfig, reporterConfig) -} diff --git a/conformance/01_pull_test.go b/conformance/01_pull_test.go deleted file mode 100644 index c7c84a44..00000000 --- a/conformance/01_pull_test.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright contributors to the Open Containers Distribution Specification -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package conformance - -import ( - "net/http" - "os" - - "github.com/bloodorangeio/reggie" - g "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var test01Pull = func() { - g.Context(titlePull, func() { - - var tag string - - g.Context("Setup", func() { - g.Specify("Populate registry with test blob", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", configs[0].Digest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", configs[0].ContentLength). - SetBody(configs[0].Content) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - }) - - g.Specify("Populate registry with test blob", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", configs[1].Digest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", configs[1].ContentLength). - SetBody(configs[1].Content) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - }) - - g.Specify("Populate registry with test layer", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", layerBlobDigest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", layerBlobContentLength). - SetBody(layerBlobData) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - }) - - g.Specify("Populate registry with test manifest", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - tag = testTagName - req := client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(tag)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(manifests[0].Content) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - }) - - g.Specify("Populate registry with test manifest", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(manifests[1].Digest)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(manifests[1].Content) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - }) - - g.Specify("Get tag name from environment", func() { - SkipIfDisabled(pull) - RunOnlyIfNot(runPullSetup) - tmp := os.Getenv(envVarTagName) - if tmp != "" { - tag = tmp - } - }) - }) - - g.Context("Pull blobs", func() { - g.Specify("HEAD request to nonexistent blob should result in 404 response", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.HEAD, "/v2//blobs/", - reggie.WithDigest(dummyDigest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusNotFound)) - }) - - g.Specify("HEAD request to existing blob should yield 200", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.HEAD, "/v2//blobs/", - reggie.WithDigest(configs[0].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - if h := resp.Header().Get("Docker-Content-Digest"); h != "" { - Expect(h).To(Equal(configs[0].Digest)) - } - }) - - g.Specify("GET nonexistent blob should result in 404 response", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.GET, "/v2//blobs/", - reggie.WithDigest(dummyDigest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusNotFound)) - }) - - g.Specify("GET request to existing blob URL should yield 200", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.GET, "/v2//blobs/", reggie.WithDigest(configs[0].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - }) - }) - - g.Context("Pull manifests", func() { - g.Specify("HEAD request to nonexistent manifest should return 404", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.HEAD, "/v2//manifests/", - reggie.WithReference(nonexistentManifest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusNotFound)) - }) - - g.Specify("HEAD request to manifest[0] path (digest) should yield 200 response", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.HEAD, "/v2//manifests/", reggie.WithDigest(manifests[0].Digest)). - SetHeader("Accept", "application/vnd.oci.image.manifest.v1+json") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - if h := resp.Header().Get("Docker-Content-Digest"); h != "" { - Expect(h).To(Equal(manifests[0].Digest)) - } - }) - - g.Specify("HEAD request to manifest[1] path (digest) should yield 200 response", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.HEAD, "/v2//manifests/", reggie.WithDigest(manifests[1].Digest)). - SetHeader("Accept", "application/vnd.oci.image.manifest.v1+json") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - if h := resp.Header().Get("Docker-Content-Digest"); h != "" { - Expect(h).To(Equal(manifests[1].Digest)) - } - }) - - g.Specify("HEAD request to manifest path (tag) should yield 200 response", func() { - SkipIfDisabled(pull) - Expect(tag).ToNot(BeEmpty()) - req := client.NewRequest(reggie.HEAD, "/v2//manifests/", reggie.WithReference(tag)). - SetHeader("Accept", "application/vnd.oci.image.manifest.v1+json") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - if h := resp.Header().Get("Docker-Content-Digest"); h != "" { - Expect(h).To(Equal(manifests[0].Digest)) - } - }) - - g.Specify("GET nonexistent manifest should return 404", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.GET, "/v2//manifests/", - reggie.WithReference(nonexistentManifest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusNotFound)) - }) - - g.Specify("GET request to manifest[0] path (digest) should yield 200 response", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.GET, "/v2//manifests/", reggie.WithDigest(manifests[0].Digest)). - SetHeader("Accept", "application/vnd.oci.image.manifest.v1+json") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - }) - - g.Specify("GET request to manifest[1] path (digest) should yield 200 response", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.GET, "/v2//manifests/", reggie.WithDigest(manifests[1].Digest)). - SetHeader("Accept", "application/vnd.oci.image.manifest.v1+json") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - }) - - g.Specify("GET request to manifest path (tag) should yield 200 response", func() { - SkipIfDisabled(pull) - Expect(tag).ToNot(BeEmpty()) - req := client.NewRequest(reggie.GET, "/v2//manifests/", reggie.WithReference(tag)). - SetHeader("Accept", "application/vnd.oci.image.manifest.v1+json") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - }) - }) - - g.Context("Error codes", func() { - g.Specify("400 response body should contain OCI-conforming JSON message", func() { - SkipIfDisabled(pull) - req := client.NewRequest(reggie.GET, "/v2//manifests/", - reggie.WithReference("sha256:totallywrong")). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(invalidManifestContent) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - Equal(http.StatusBadRequest), - Equal(http.StatusNotFound))) - if resp.StatusCode() == http.StatusBadRequest { - errorResponses, err := resp.Errors() - Expect(err).To(BeNil()) - - Expect(errorResponses).ToNot(BeEmpty()) - Expect(errorCodes).To(ContainElement(errorResponses[0].Code)) - } - }) - }) - - g.Context("Teardown", func() { - if deleteManifestBeforeBlobs { - g.Specify("Delete manifest[0] created in setup", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(manifests[0].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusMethodNotAllowed), - )) - }) - g.Specify("Delete manifest[1] created in setup", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(manifests[1].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusMethodNotAllowed), - )) - }) - } - - g.Specify("Delete config[0] blob created in setup", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(configs[0].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - }) - g.Specify("Delete config[1] blob created in setup", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(configs[1].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - }) - - g.Specify("Delete layer blob created in setup", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(layerBlobDigest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - }) - - if !deleteManifestBeforeBlobs { - g.Specify("Delete manifest[0] created in setup", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(manifests[0].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusMethodNotAllowed), - )) - }) - g.Specify("Delete manifest[1] created in setup", func() { - SkipIfDisabled(pull) - RunOnlyIf(runPullSetup) - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(manifests[1].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusMethodNotAllowed), - )) - }) - } - }) - }) -} diff --git a/conformance/02_push_test.go b/conformance/02_push_test.go deleted file mode 100644 index 65ce7875..00000000 --- a/conformance/02_push_test.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright contributors to the Open Containers Distribution Specification -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package conformance - -import ( - "fmt" - "net/http" - - "strconv" - - "github.com/bloodorangeio/reggie" - g "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var test02Push = func() { - g.Context(titlePush, func() { - - var lastResponse, prevResponse *reggie.Response - var emptyLayerManifestRef string - - g.Context("Setup", func() { - // No setup required at this time for push tests - }) - - g.Context("Blob Upload Streamed", func() { - g.Specify("PATCH request with blob in body should yield 202 response", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - location := resp.Header().Get("Location") - Expect(location).ToNot(BeEmpty()) - - req = client.NewRequest(reggie.PATCH, resp.GetRelativeLocation()). - SetHeader("Content-Type", "application/octet-stream"). - SetBody(testBlobA) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusAccepted)) - lastResponse = resp - }) - - g.Specify("PUT request to session URL with digest should yield 201 response", func() { - SkipIfDisabled(push) - Expect(lastResponse).ToNot(BeNil()) - req := client.NewRequest(reggie.PUT, lastResponse.GetRelativeLocation()). - SetQueryParam("digest", testBlobADigest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", testBlobALength) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) - location := resp.Header().Get("Location") - Expect(location).ToNot(BeEmpty()) - }) - }) - - g.Context("Blob Upload Monolithic", func() { - g.Specify("GET nonexistent blob should result in 404 response", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.GET, "/v2//blobs/", - reggie.WithDigest(dummyDigest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusNotFound)) - }) - - g.Specify("POST request with digest and blob should yield a 201 or 202", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/"). - SetHeader("Content-Length", configs[1].ContentLength). - SetHeader("Content-Type", "application/octet-stream"). - SetQueryParam("digest", configs[1].Digest). - SetBody(configs[1].Content) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - location := resp.Header().Get("Location") - Expect(location).ToNot(BeEmpty()) - Expect(resp.StatusCode()).To(SatisfyAny( - Equal(http.StatusCreated), - Equal(http.StatusAccepted), - )) - lastResponse = resp - }) - - g.Specify("GET request to blob URL from prior request should yield 200 or 404 based on response code", func() { - SkipIfDisabled(push) - Expect(lastResponse).ToNot(BeNil()) - req := client.NewRequest(reggie.GET, "/v2//blobs/", reggie.WithDigest(configs[1].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - if lastResponse.StatusCode() == http.StatusAccepted { - Expect(resp.StatusCode()).To(Equal(http.StatusNotFound)) - } else { - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - } - }) - - g.Specify("POST request should yield a session ID", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusAccepted)) - lastResponse = resp - }) - - g.Specify("PUT upload of a blob should yield a 201 Response", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.PUT, lastResponse.GetRelativeLocation()). - SetHeader("Content-Length", configs[1].ContentLength). - SetHeader("Content-Type", "application/octet-stream"). - SetQueryParam("digest", configs[1].Digest). - SetBody(configs[1].Content) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - location := resp.Header().Get("Location") - Expect(location).ToNot(BeEmpty()) - Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) - }) - - g.Specify("GET request to existing blob should yield 200 response", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.GET, "/v2//blobs/", reggie.WithDigest(configs[1].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - }) - - g.Specify("PUT upload of a layer blob should yield a 201 Response", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetHeader("Content-Length", layerBlobContentLength). - SetHeader("Content-Type", "application/octet-stream"). - SetQueryParam("digest", layerBlobDigest). - SetBody(layerBlobData) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - location := resp.Header().Get("Location") - Expect(location).ToNot(BeEmpty()) - Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) - }) - - g.Specify("GET request to existing layer should yield 200 response", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.GET, "/v2//blobs/", reggie.WithDigest(layerBlobDigest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - }) - }) - - g.Context("Blob Upload Chunked", func() { - g.Specify("Out-of-order blob upload should return 416", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/"). - SetHeader("Content-Length", "0") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - location := resp.Header().Get("Location") - Expect(location).ToNot(BeEmpty()) - - // rebuild chunked blob if min size is above our chunk size - minSizeStr := resp.Header().Get("OCI-Chunk-Min-Length") - if minSizeStr != "" { - minSize, err := strconv.Atoi(minSizeStr) - Expect(err).To(BeNil()) - if minSize > len(testBlobBChunk1) { - setupChunkedBlob(minSize*2 - 2) - } - } - - req = client.NewRequest(reggie.PATCH, resp.GetRelativeLocation()). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", testBlobBChunk2Length). - SetHeader("Content-Range", testBlobBChunk2Range). - SetBody(testBlobBChunk2) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusRequestedRangeNotSatisfiable)) - }) - - g.Specify("PATCH request with first chunk should return 202", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/"). - SetHeader("Content-Length", "0") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - location := resp.Header().Get("Location") - Expect(location).ToNot(BeEmpty()) - prevResponse = resp - req = client.NewRequest(reggie.PATCH, resp.GetRelativeLocation()). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", testBlobBChunk1Length). - SetHeader("Content-Range", testBlobBChunk1Range). - SetBody(testBlobBChunk1) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusAccepted)) - Expect(resp.Header().Get("Range")).To(Equal(testBlobBChunk1Range)) - lastResponse = resp - }) - - g.Specify("Retry previous blob chunk should return 416", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.PATCH, prevResponse.GetRelativeLocation()). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", testBlobBChunk1Length). - SetHeader("Content-Range", testBlobBChunk1Range). - SetBody(testBlobBChunk1) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusRequestedRangeNotSatisfiable)) - }) - - g.Specify("Get on stale blob upload should return 204 with a range and location", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.GET, prevResponse.GetRelativeLocation()) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusNoContent)) - Expect(resp.Header().Get("Location")).ToNot(BeEmpty()) - Expect(resp.Header().Get("Range")).To(Equal(testBlobBChunk1Range)) - lastResponse = resp - }) - - g.Specify("PATCH request with second chunk should return 202", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.PATCH, lastResponse.GetRelativeLocation()). - SetHeader("Content-Length", testBlobBChunk2Length). - SetHeader("Content-Range", testBlobBChunk2Range). - SetHeader("Content-Type", "application/octet-stream"). - SetBody(testBlobBChunk2) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - location := resp.Header().Get("Location") - Expect(resp.StatusCode()).To(Equal(http.StatusAccepted)) - Expect(resp.Header().Get("Range")).To(Equal(fmt.Sprintf("0-%d", len(testBlobB)-1))) - Expect(location).ToNot(BeEmpty()) - lastResponse = resp - }) - - g.Specify("PUT request with digest should return 201", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.PUT, lastResponse.GetRelativeLocation()). - SetHeader("Content-Length", "0"). - SetHeader("Content-Type", "application/octet-stream"). - SetQueryParam("digest", testBlobBDigest) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) - location := resp.Header().Get("Location") - Expect(location).ToNot(BeEmpty()) - }) - }) - - g.Context("Cross-Repository Blob Mount", func() { - g.Specify("Cross-mounting of a blob without the from argument should yield session id", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/", - reggie.WithName(crossmountNamespace)). - SetQueryParam("mount", dummyDigest) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusAccepted)) - Expect(resp.GetAbsoluteLocation()).To(Not(BeEmpty())) - }) - - g.Specify("POST request to mount another repository's blob should return 201 or 202", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/", - reggie.WithName(crossmountNamespace)). - SetQueryParam("mount", testBlobADigest). - SetQueryParam("from", client.Config.DefaultName) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - Equal(http.StatusCreated), - Equal(http.StatusAccepted), - )) - lastResponse = resp - }) - - g.Specify("GET request to test digest within cross-mount namespace should return 200", func() { - SkipIfDisabled(push) - RunOnlyIf(lastResponse.StatusCode() == http.StatusCreated) - Expect(lastResponse.GetRelativeLocation()).To(Equal(fmt.Sprintf("/v2/%s/blobs/%s", crossmountNamespace, testBlobADigest))) - req := client.NewRequest(reggie.GET, lastResponse.GetRelativeLocation()) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - }) - - g.Specify("Cross-mounting of nonexistent blob should yield session id", func() { - SkipIfDisabled(push) - RunOnlyIf(lastResponse.StatusCode() == http.StatusAccepted) - Expect(lastResponse.GetRelativeLocation()).To(HavePrefix(fmt.Sprintf("/v2/%s/blobs/uploads/", crossmountNamespace))) - }) - - g.Specify("Cross-mounting without from, and automatic content discovery enabled should return a 201", func() { - SkipIfDisabled(push) - RunOnlyIf(runAutomaticCrossmountTest) - RunOnlyIf(lastResponse.StatusCode() == http.StatusCreated) - RunOnlyIf(automaticCrossmountEnabled) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/", - reggie.WithName(crossmountNamespace)). - SetQueryParam("mount", testBlobADigest) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) - }) - - g.Specify("Cross-mounting without from, and automatic content discovery disabled should return a 202", func() { - SkipIfDisabled(push) - RunOnlyIf(runAutomaticCrossmountTest) - RunOnlyIf(lastResponse.StatusCode() == http.StatusCreated) - RunOnlyIfNot(automaticCrossmountEnabled) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/", - reggie.WithName(crossmountNamespace)). - SetQueryParam("mount", testBlobADigest) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusAccepted)) - }) - }) - - g.Context("Manifest Upload", func() { - g.Specify("GET nonexistent manifest should return 404", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.GET, "/v2//manifests/", - reggie.WithReference(nonexistentManifest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusNotFound)) - }) - - g.Specify("PUT should accept a manifest upload", func() { - SkipIfDisabled(push) - for i := 0; i < 4; i++ { - tag := fmt.Sprintf("test%d", i) - req := client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(tag)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(manifests[1].Content) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - location := resp.Header().Get("Location") - Expect(location).ToNot(BeEmpty()) - Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) - } - }) - - g.Specify("Registry should accept a manifest upload with no layers", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(emptyLayerTestTag)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(emptyLayerManifestContent) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - if resp.StatusCode() == http.StatusCreated { - location := resp.Header().Get("Location") - emptyLayerManifestRef = location - Expect(location).ToNot(BeEmpty()) - Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) - } else { - Warn("image manifest with no layers is not supported") - } - }) - - g.Specify("GET request to manifest URL (digest) should yield 200 response", func() { - SkipIfDisabled(push) - req := client.NewRequest(reggie.GET, "/v2//manifests/", reggie.WithDigest(manifests[1].Digest)). - SetHeader("Accept", "application/vnd.oci.image.manifest.v1+json") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - }) - }) - - g.Context("Teardown", func() { - if deleteManifestBeforeBlobs { - g.Specify("Delete manifest created in tests", func() { - SkipIfDisabled(push) - RunOnlyIf(runPushSetup) - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(manifests[1].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusMethodNotAllowed), - )) - if emptyLayerManifestRef != "" { - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithReference(emptyLayerManifestDigest)) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusMethodNotAllowed), - )) - } - }) - } - - g.Specify("Delete config blob created in tests", func() { - SkipIfDisabled(push) - RunOnlyIf(runPushSetup) - req := client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(configs[1].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - }) - - g.Specify("Delete layer blob created in setup", func() { - SkipIfDisabled(push) - RunOnlyIf(runPushSetup) - req := client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(layerBlobDigest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - }) - - if !deleteManifestBeforeBlobs { - g.Specify("Delete manifest created in tests", func() { - SkipIfDisabled(push) - RunOnlyIf(runPushSetup) - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(manifests[1].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusMethodNotAllowed), - )) - if emptyLayerManifestRef != "" { - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithReference(emptyLayerManifestDigest)) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusMethodNotAllowed), - )) - } - }) - } - }) - }) -} diff --git a/conformance/03_discovery_test.go b/conformance/03_discovery_test.go deleted file mode 100644 index 3ad2fbd5..00000000 --- a/conformance/03_discovery_test.go +++ /dev/null @@ -1,577 +0,0 @@ -// Copyright contributors to the Open Containers Distribution Specification -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package conformance - -import ( - "encoding/json" - "fmt" - "net/http" - "os" - "sort" - "strconv" - "strings" - - "github.com/bloodorangeio/reggie" - g "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - godigest "github.com/opencontainers/go-digest" -) - -var test03ContentDiscovery = func() { - g.Context(titleContentDiscovery, func() { - - var numTags = 4 - var tagList []string - - g.Context("Setup", func() { - g.Specify("Populate registry with test blob", func() { - SkipIfDisabled(contentDiscovery) - RunOnlyIf(runContentDiscoverySetup) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", configs[2].Digest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", configs[2].ContentLength). - SetBody(configs[2].Content) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - }) - - g.Specify("Populate registry with test layer", func() { - SkipIfDisabled(contentDiscovery) - RunOnlyIf(runContentDiscoverySetup) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", layerBlobDigest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", layerBlobContentLength). - SetBody(layerBlobData) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - }) - - g.Specify("Populate registry with test tags", func() { - SkipIfDisabled(contentDiscovery) - RunOnlyIf(runContentDiscoverySetup) - for i := 0; i < numTags; i++ { - for _, tag := range []string{"test" + strconv.Itoa(i), "TEST" + strconv.Itoa(i)} { - tagList = append(tagList, tag) - req := client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(tag)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(manifests[2].Content) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - } - } - req := client.NewRequest(reggie.GET, "/v2//tags/list") - resp, err := client.Do(req) - tagList = getTagList(resp) - _ = err - }) - - g.Specify("Populate registry with test tags (no push)", func() { - SkipIfDisabled(contentDiscovery) - RunOnlyIfNot(runContentDiscoverySetup) - tagList = strings.Split(os.Getenv(envVarTagList), ",") - }) - - g.Specify("References setup", func() { - SkipIfDisabled(contentDiscovery) - RunOnlyIf(runContentDiscoverySetup) - - // Populate registry with empty JSON blob - // validate expected empty JSON blob digest - Expect(emptyJSONDescriptor.Digest).To(Equal(godigest.Digest("sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a"))) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", emptyJSONDescriptor.Digest.String()). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", fmt.Sprintf("%d", emptyJSONDescriptor.Size)). - SetBody(emptyJSONBlob) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - - // Populate registry with reference blob before the image manifest is pushed - req = client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err = client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", testRefBlobADigest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", testRefBlobALength). - SetBody(testRefBlobA) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - - // Populate registry with test references manifest (config.MediaType = artifactType) - req = client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(refsManifestAConfigArtifactDigest)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(refsManifestAConfigArtifactContent) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - Expect(resp.Header().Get("OCI-Subject")).To(Equal(manifests[4].Digest)) - - // Populate registry with test references manifest (ArtifactType, config.MediaType = emptyJSON) - req = client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(refsManifestALayerArtifactDigest)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(refsManifestALayerArtifactContent) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - Expect(resp.Header().Get("OCI-Subject")).To(Equal(manifests[4].Digest)) - - // Populate registry with test index manifest - req = client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(refsIndexArtifactDigest)). - SetHeader("Content-Type", "application/vnd.oci.image.index.v1+json"). - SetBody(refsIndexArtifactContent) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - Expect(resp.Header().Get("OCI-Subject")).To(Equal(manifests[4].Digest)) - - // Populate registry with test blob - req = client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err = client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", configs[4].Digest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", configs[4].ContentLength). - SetBody(configs[4].Content) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - - // Populate registry with test layer - req = client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err = client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", layerBlobDigest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", layerBlobContentLength). - SetBody(layerBlobData) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - - // Populate registry with test manifest - tag := testTagName - req = client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(tag)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(manifests[4].Content) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - - // Populate registry with reference blob after the image manifest is pushed - req = client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err = client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", testRefBlobBDigest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", testRefBlobBLength). - SetBody(testRefBlobB) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - - // Populate registry with test references manifest (config.MediaType = artifactType) - req = client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(refsManifestBConfigArtifactDigest)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(refsManifestBConfigArtifactContent) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - Expect(resp.Header().Get("OCI-Subject")).To(Equal(manifests[4].Digest)) - - // Populate registry with test references manifest (ArtifactType, config.MediaType = emptyJSON) - req = client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(refsManifestBLayerArtifactDigest)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(refsManifestBLayerArtifactContent) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - Expect(resp.Header().Get("OCI-Subject")).To(Equal(manifests[4].Digest)) - - // Populate registry with test references manifest to a non-existent subject - req = client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(refsManifestCLayerArtifactDigest)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(refsManifestCLayerArtifactContent) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - Expect(resp.Header().Get("OCI-Subject")).To(Equal(manifests[3].Digest)) - }) - }) - - g.Context("Test content discovery endpoints (listing tags)", func() { - g.Specify("GET request to list tags should yield 200 response and be in sorted order", func() { - SkipIfDisabled(contentDiscovery) - req := client.NewRequest(reggie.GET, "/v2//tags/list") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - tagList = getTagList(resp) - numTags = len(tagList) - // If the list is not empty, the tags MUST be in lexical order (i.e. case-insensitive alphanumeric order). - sortedTagListLexical := append([]string{}, tagList...) - sort.SliceStable(sortedTagListLexical, func(i, j int) bool { - return strings.ToLower(sortedTagListLexical[i]) < strings.ToLower(sortedTagListLexical[j]) - }) - // Historically, registries have not been lexical, so allow `sort.Strings` to be valid too. - sortedTagListAsciibetical := append([]string{}, tagList...) - sort.Strings(sortedTagListAsciibetical) - Expect(tagList).To(Or(Equal(sortedTagListLexical), Equal(sortedTagListAsciibetical))) - }) - - g.Specify("GET number of tags should be limitable by `n` query parameter", func() { - SkipIfDisabled(contentDiscovery) - numResults := numTags / 2 - req := client.NewRequest(reggie.GET, "/v2//tags/list"). - SetQueryParam("n", strconv.Itoa(numResults)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - tagList = getTagList(resp) - Expect(len(tagList)).To(Equal(numResults)) - }) - - g.Specify("GET start of tag is set by `last` query parameter", func() { - SkipIfDisabled(contentDiscovery) - numResults := numTags / 2 - req := client.NewRequest(reggie.GET, "/v2//tags/list"). - SetQueryParam("n", strconv.Itoa(numResults)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - tagList = getTagList(resp) - last := tagList[numResults-1] - req = client.NewRequest(reggie.GET, "/v2//tags/list"). - SetQueryParam("n", strconv.Itoa(numResults)). - SetQueryParam("last", tagList[numResults-1]) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - tagList = getTagList(resp) - Expect(len(tagList)).To(BeNumerically("<=", numResults)) - Expect(tagList).ToNot(ContainElement(last)) - }) - }) - - g.Context("Test content discovery endpoints (listing references)", func() { - g.Specify("GET request to nonexistent blob should result in empty 200 response", func() { - SkipIfDisabled(contentDiscovery) - req := client.NewRequest(reggie.GET, "/v2//referrers/", - reggie.WithDigest(dummyDigest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - Expect(resp.Header().Get("Content-Type")).To(Equal("application/vnd.oci.image.index.v1+json")) - - var index index - err = json.Unmarshal(resp.Body(), &index) - Expect(err).To(BeNil()) - Expect(len(index.Manifests)).To(BeZero()) - }) - - g.Specify("GET request to existing blob should yield 200", func() { - SkipIfDisabled(contentDiscovery) - req := client.NewRequest(reggie.GET, "/v2//referrers/", - reggie.WithDigest(manifests[4].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - Expect(resp.Header().Get("Content-Type")).To(Equal("application/vnd.oci.image.index.v1+json")) - - var index index - err = json.Unmarshal(resp.Body(), &index) - Expect(err).To(BeNil()) - Expect(len(index.Manifests)).To(Equal(5)) - Expect(index.Manifests[0].Digest).ToNot(Equal(index.Manifests[1].Digest)) - for i := 0; i < len(index.Manifests); i++ { - Expect(len(index.Manifests[i].Annotations)).To(Equal(1)) - Expect(index.Manifests[i].Annotations[testAnnotationKey]).To(Equal(testAnnotationValues[index.Manifests[i].Digest.String()])) - } - }) - - g.Specify("GET request to existing blob with filter should yield 200", func() { - SkipIfDisabled(contentDiscovery) - req := client.NewRequest(reggie.GET, "/v2//referrers/", - reggie.WithDigest(manifests[4].Digest)). - SetQueryParam("artifactType", testRefArtifactTypeA) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - Expect(resp.Header().Get("Content-Type")).To(Equal("application/vnd.oci.image.index.v1+json")) - - var index index - err = json.Unmarshal(resp.Body(), &index) - Expect(err).To(BeNil()) - - // also check resp header "OCI-Filters-Applied: artifactType" denoting that an artifactType filter was applied - if resp.Header().Get("OCI-Filters-Applied") != "" { - Expect(len(index.Manifests)).To(Equal(2)) - Expect(resp.Header().Get("OCI-Filters-Applied")).To(Equal(artifactTypeFilter)) - for i := 0; i < len(index.Manifests); i++ { - Expect(len(index.Manifests[i].Annotations)).To(Equal(1)) - Expect(index.Manifests[i].Annotations[testAnnotationKey]).To(Equal(testAnnotationValues[index.Manifests[i].Digest.String()])) - } - } else { - Expect(len(index.Manifests)).To(Equal(5)) - for i := 0; i < len(index.Manifests); i++ { - Expect(len(index.Manifests[i].Annotations)).To(Equal(1)) - Expect(index.Manifests[i].Annotations[testAnnotationKey]).To(Equal(testAnnotationValues[index.Manifests[i].Digest.String()])) - } - Warn("filtering by artifact-type is not implemented") - } - }) - - g.Specify("GET request to missing manifest should yield 200", func() { - SkipIfDisabled(contentDiscovery) - req := client.NewRequest(reggie.GET, "/v2//referrers/", - reggie.WithDigest(manifests[3].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - Expect(resp.Header().Get("Content-Type")).To(Equal("application/vnd.oci.image.index.v1+json")) - - var index index - err = json.Unmarshal(resp.Body(), &index) - Expect(err).To(BeNil()) - Expect(len(index.Manifests)).To(Equal(1)) - Expect(index.Manifests[0].Digest.String()).To(Equal(refsManifestCLayerArtifactDigest)) - }) - }) - - g.Context("Teardown", func() { - if deleteManifestBeforeBlobs { - g.Specify("Delete created manifest & associated tags", func() { - SkipIfDisabled(contentDiscovery) - RunOnlyIf(runContentDiscoverySetup) - references := []string{ - refsIndexArtifactDigest, - manifests[2].Digest, - manifests[4].Digest, - refsManifestAConfigArtifactDigest, - refsManifestALayerArtifactDigest, - refsManifestBConfigArtifactDigest, - refsManifestBLayerArtifactDigest, - refsManifestCLayerArtifactDigest, - } - for _, ref := range references { - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(ref)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - } - }) - } - - g.Specify("Delete config blob created in tests", func() { - SkipIfDisabled(contentDiscovery) - RunOnlyIf(runContentDiscoverySetup) - req := client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(configs[2].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - }) - - g.Specify("Delete layer blob created in setup", func() { - SkipIfDisabled(contentDiscovery) - RunOnlyIf(runContentDiscoverySetup) - req := client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(layerBlobDigest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - }) - - if !deleteManifestBeforeBlobs { - g.Specify("Delete created manifest & associated tags", func() { - SkipIfDisabled(contentDiscovery) - RunOnlyIf(runContentDiscoverySetup) - references := []string{ - refsIndexArtifactDigest, - manifests[2].Digest, - manifests[4].Digest, - refsManifestAConfigArtifactDigest, - refsManifestALayerArtifactDigest, - refsManifestBConfigArtifactDigest, - refsManifestBLayerArtifactDigest, - refsManifestCLayerArtifactDigest, - } - for _, ref := range references { - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(ref)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusMethodNotAllowed), - Equal(http.StatusNotFound), - )) - } - }) - } - - g.Specify("References teardown", func() { - SkipIfDisabled(contentDiscovery) - RunOnlyIf(runContentDiscoverySetup) - - deleteReq := func(req *reggie.Request) { - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300), - ), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - } - - if deleteManifestBeforeBlobs { - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(refsIndexArtifactDigest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(refsManifestAConfigArtifactDigest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(refsManifestALayerArtifactDigest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(manifests[4].Digest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(refsManifestBConfigArtifactDigest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(refsManifestBLayerArtifactDigest)) - deleteReq(req) - } - - // Delete config blob created in setup - req := client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(configs[4].Digest)) - deleteReq(req) - - // Delete reference blob created in setup - req = client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(testRefBlobADigest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(testRefBlobBDigest)) - deleteReq(req) - - // Delete empty JSON blob created in setup - req = client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(emptyJSONDescriptor.Digest.String())) - deleteReq(req) - - if !deleteManifestBeforeBlobs { - // Delete manifest created in setup - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(refsIndexArtifactDigest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(refsManifestAConfigArtifactDigest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(refsManifestALayerArtifactDigest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(manifests[4].Digest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(refsManifestBConfigArtifactDigest)) - deleteReq(req) - req = client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(refsManifestBLayerArtifactDigest)) - deleteReq(req) - } - }) - }) - }) -} diff --git a/conformance/04_management_test.go b/conformance/04_management_test.go deleted file mode 100644 index 00334b23..00000000 --- a/conformance/04_management_test.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright contributors to the Open Containers Distribution Specification -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package conformance - -import ( - "encoding/json" - "net/http" - - "github.com/bloodorangeio/reggie" - g "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var test04ContentManagement = func() { - g.Context(titleContentManagement, func() { - - const defaultTagName = "tagtest0" - var tagToDelete string - var numTags int - var blobDeleteAllowed = true - - g.Context("Setup", func() { - g.Specify("Populate registry with test config blob", func() { - SkipIfDisabled(contentManagement) - RunOnlyIf(runContentManagementSetup) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetHeader("Content-Length", configs[3].ContentLength). - SetHeader("Content-Type", "application/octet-stream"). - SetQueryParam("digest", configs[3].Digest). - SetBody(configs[3].Content) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - }) - - g.Specify("Populate registry with test layer", func() { - SkipIfDisabled(contentManagement) - RunOnlyIf(runContentManagementSetup) - req := client.NewRequest(reggie.POST, "/v2//blobs/uploads/") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - req = client.NewRequest(reggie.PUT, resp.GetRelativeLocation()). - SetQueryParam("digest", layerBlobDigest). - SetHeader("Content-Type", "application/octet-stream"). - SetHeader("Content-Length", layerBlobContentLength). - SetBody(layerBlobData) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - }) - - g.Specify("Populate registry with test tag", func() { - SkipIfDisabled(contentManagement) - RunOnlyIf(runContentManagementSetup) - tagToDelete = defaultTagName - req := client.NewRequest(reggie.PUT, "/v2//manifests/", - reggie.WithReference(tagToDelete)). - SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). - SetBody(manifests[3].Content) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAll( - BeNumerically(">=", 200), - BeNumerically("<", 300))) - }) - - g.Specify("Check how many tags there are before anything gets deleted", func() { - SkipIfDisabled(contentManagement) - RunOnlyIf(runContentManagementSetup) - req := client.NewRequest(reggie.GET, "/v2//tags/list") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusOK)) - tagList := &TagList{} - jsonData := []byte(resp.String()) - err = json.Unmarshal(jsonData, tagList) - Expect(err).To(BeNil()) - numTags = len(tagList.Tags) - }) - }) - - g.Context("Manifest delete", func() { - g.Specify("DELETE request to manifest tag should return 202, unless tag deletion is disallowed (400/405)", func() { - SkipIfDisabled(contentManagement) - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", - reggie.WithReference(tagToDelete)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - Equal(http.StatusBadRequest), - Equal(http.StatusAccepted), - Equal(http.StatusMethodNotAllowed))) - if resp.StatusCode() == http.StatusBadRequest { - errorResponses, err := resp.Errors() - Expect(err).To(BeNil()) - Expect(errorResponses).ToNot(BeEmpty()) - Expect(errorResponses[0].Code).To(Equal(errorCodes[UNSUPPORTED])) - } - }) - - g.Specify("DELETE request to manifest (digest) should yield 202 response unless already deleted", func() { - SkipIfDisabled(contentManagement) - req := client.NewRequest(reggie.DELETE, "/v2//manifests/", reggie.WithDigest(manifests[3].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - // In the case that the previous request was accepted, this may or may not fail (which is ok) - Expect(resp.StatusCode()).To(SatisfyAny( - Equal(http.StatusAccepted), - Equal(http.StatusNotFound), - )) - }) - - g.Specify("GET request to deleted manifest URL should yield 404 response, unless delete is disallowed", func() { - SkipIfDisabled(contentManagement) - req := client.NewRequest(reggie.GET, "/v2//manifests/", reggie.WithDigest(manifests[3].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - Equal(http.StatusNotFound), - Equal(http.StatusOK), - )) - }) - - g.Specify("GET request to tags list should reflect manifest deletion", func() { - SkipIfDisabled(contentManagement) - req := client.NewRequest(reggie.GET, "/v2//tags/list") - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - Equal(http.StatusNotFound), - Equal(http.StatusOK), - )) - expectTags := numTags - 1 - if resp.StatusCode() == http.StatusOK { - tagList := &TagList{} - jsonData := []byte(resp.String()) - err = json.Unmarshal(jsonData, tagList) - Expect(err).To(BeNil()) - Expect(len(tagList.Tags)).To(Equal(expectTags)) - } - if resp.StatusCode() == http.StatusNotFound { - Expect(expectTags).To(Equal(0)) - } - }) - }) - - g.Context("Blob delete", func() { - g.Specify("DELETE request to blob URL should yield 202 response", func() { - SkipIfDisabled(contentManagement) - RunOnlyIf(runContentManagementSetup) - // config blob - req := client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(configs[3].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - Equal(http.StatusAccepted), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - // layer blob - req = client.NewRequest(reggie.DELETE, "/v2//blobs/", reggie.WithDigest(layerBlobDigest)) - resp, err = client.Do(req) - Expect(err).To(BeNil()) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(SatisfyAny( - Equal(http.StatusAccepted), - Equal(http.StatusNotFound), - Equal(http.StatusMethodNotAllowed), - )) - if resp.StatusCode() == http.StatusMethodNotAllowed { - blobDeleteAllowed = false - } - }) - - g.Specify("GET request to deleted blob URL should yield 404 response", func() { - SkipIfDisabled(contentManagement) - RunOnlyIf(runContentManagementSetup) - RunOnlyIf(blobDeleteAllowed) - req := client.NewRequest(reggie.GET, "/v2//blobs/", reggie.WithDigest(configs[3].Digest)) - resp, err := client.Do(req) - Expect(err).To(BeNil()) - Expect(resp.StatusCode()).To(Equal(http.StatusNotFound)) - }) - }) - - g.Context("Teardown", func() { - // TODO: delete blob+tag? - // No teardown required at this time for content management tests - }) - }) -} diff --git a/conformance/Dockerfile b/conformance/Dockerfile index 8744d7dc..23bf1dfe 100644 --- a/conformance/Dockerfile +++ b/conformance/Dockerfile @@ -1,32 +1,10 @@ -# Copyright contributors to the Open Containers Distribution Specification -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +FROM golang:1.24-alpine AS build -# --- -# Stage 1: Install certs and build conformance binary -# --- -FROM docker.io/golang:1.19.7-alpine3.17 AS builder -ARG VERSION=unknown -ARG GO_PKG=github.com/opencontainers/distribution-spec/conformance -RUN apk --update add git make ca-certificates && mkdir -p /go/src/${GO_PKG} -WORKDIR /go/src/${GO_PKG} -ADD . . -RUN CGO_ENABLED=0 go test -c -o /conformance.test --ldflags="-X ${GO_PKG}.Version=${VERSION}" +COPY . . +RUN CGO_ENABLED=0 go build -o /usr/local/bin/conformance . +ENTRYPOINT [ "/usr/local/bin/conformance" ] -# --- -# Stage 2: Final image with nothing but certs & binary -# --- -FROM scratch AS final -COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt -COPY --from=builder /conformance.test /conformance.test -ENTRYPOINT ["/conformance.test"] +FROM scratch +COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=build /usr/local/bin/conformance /conformance +ENTRYPOINT [ "/conformance" ] diff --git a/conformance/README.md b/conformance/README.md index 55036215..b5ff1c55 100644 --- a/conformance/README.md +++ b/conformance/README.md @@ -1,233 +1,185 @@ -## Conformance Tests +# OCI Distribution Spec Conformance Test + +The distribution-spec conformance test is used to verify the various HTTP endpoints on a registry generate the appropriate responses and handle different types of data. + +## Configuration + +The test is configured by either a yaml configuration file or environment variables. +When a setting is configured by multiple sources, the precedence from highest to lowest is the environment variable, then yaml configuration file, and lastly any legacy environment variables. + +Most registries can be tested by setting the registry, repository, and login credentials. +For APIs with a valid unsupported response code, attempts are made to track the missing feature without needing to manually disable the test. + +### Environment Variables + +Environment variables can be used to set any configuration setting in the conformance test. +The available variables and their default stable values are listed here: + +```shell +# several variables are used to configure the overall conformance test process +export OCI_CONFIGURATION="oci-conformance.yaml" # see Yaml Configuration File below +export OCI_RESULTS_DIR="./results" # output of the conformance test will be written here, see Results below +export OCI_VERSION="1.1" # distribution-spec version to test against, this adjusts default values for the API tests, also accepts "stable" and "dev" +export OCI_LOG="warn" # adjust logging threshold: debug, info, warn, error (this does not affect the generated reports) + +# the registry settings typically need to be configured +export OCI_REGISTRY="localhost:5000" +export OCI_TLS="enabled" # enabled (https), insecure (self signed), or disabled (http) +export OCI_REPO1="conformance/repo1" +export OCI_REPO2="conformance/repo2" +export OCI_USERNAME= +export OCI_PASSWORD= +export OCI_CACHE_AUTH=true # whether to cache auth headers between compatible requests + +# API settings can be used to skip specific API endpoints +export OCI_API_PULL=true +export OCI_API_PUSH=true # to disable push requests, see the OCI_RO_DATA variables below +export OCI_API_BLOBS_ATOMIC=true # whether blob delete operations should be immediate +export OCI_API_BLOBS_DELETE=true +export OCI_API_BLOBS_DIGEST_HEADER=false # whether Docker-Content-Digest header is required +export OCI_API_BLOBS_MOUNT_ANONYMOUS=true # attempt to mount a blob without a source repository +export OCI_API_BLOBS_UPLOAD_CANCEL=false # cancel a running upload +export OCI_API_MANIFESTS_ATOMIC=true # whether manifest delete operations should be immediate +export OCI_API_MANIFESTS_DELETE=true +export OCI_API_MANIFESTS_DIGEST_HEADER=false # whether Docker-Content-Digest header is required +export OCI_API_MANIFESTS_TAG_PARAM=false # push manifest by digest with tags as parameters +export OCI_API_TAGS_ATOMIC=true # whether tag delete operations should be immediate +export OCI_API_TAGS_DELETE=true +export OCI_API_TAGS_LIST=true +export OCI_API_REFERRER=true + +# Data settings are used to generate a variety of OCI content +export OCI_DATA_IMAGE=true # note, this must be left enabled for any tests to run +export OCI_DATA_INDEX=true +export OCI_DATA_INDEX_LIST=true # an index containing a nested index +export OCI_DATA_SPARSE=false # manifest where some descriptors have not been pushed +export OCI_DATA_ARTIFACT=true # an OCI artifact packaged as an image with an artifactType +export OCI_DATA_SUBJECT=true # an OCI image with the subject field defined +export OCI_DATA_SUBJECT_MISSING=true # pushes content with a subject referencing a non-existent digest +export OCI_DATA_ARTIFACT_LIST=true # an OCI index with an artifactType +export OCI_DATA_SUBJECT_LIST=true # an OCI index with the subject field defined +export OCI_DATA_DATA_FIELD=true # descriptors with the data field populated +export OCI_DATA_NONDISTRIBUTABLE=true # an OCI image containing nondistributable layer references that have not been pushed +export OCI_DATA_CUSTOM_FIELDS=true # manifests and config json with additional fields +export OCI_DATA_NO_LAYERS=true # image manifest with an empty layer list +export OCI_DATA_EMPTY_BLOB=true # zero byte blob +export OCI_DATA_SHA512=true # content pushed using the sha512 digest algorithm + +# For testing read-only registries, images must be preloaded. +# OCI_API_PUSH=false must be set, and disabling DELETE APIs is recommended. +# All requests are performed against the OCI_REPO1 repository. +export OCI_RO_DATA_TAGS= # space separated list of tags +export OCI_RO_DATA_MANIFESTS= # space separated list of manifest digests +export OCI_RO_DATA_BLOBS= # space separated list of blob digests +export OCI_RO_DATA_REFERRERS= # space separated list of subject digests for the referrers API + +# other settings +export OCI_FILTER_TEST= # used to filter a specific branch of tests in, e.g. "OCI Conformance Test/sha256 blobs" +``` + +### Yaml Configuration File + +The conformance test will load `oci-conformance.yaml` by default, which can be configured with the `OCI_CONFIGURATION` environment variable. + +The default yaml configuration is shown below and matches the environment variables described above: -### How to Run - -#### Binary - -Requires Go 1.17+. - -In this directory, build the test binary: -``` -go test -c -``` - -This will produce an executable at `conformance.test`. - -Next, set environment variables with your registry details: -``` -# Registry details -export OCI_ROOT_URL="https://r.myreg.io" -export OCI_NAMESPACE="myorg/myrepo" -export OCI_CROSSMOUNT_NAMESPACE="myorg/other" -export OCI_USERNAME="myuser" -export OCI_PASSWORD="mypass" - -# Which workflows to run -export OCI_TEST_PULL=1 -export OCI_TEST_PUSH=1 -export OCI_TEST_CONTENT_DISCOVERY=1 -export OCI_TEST_CONTENT_MANAGEMENT=1 - -# Extra settings -export OCI_HIDE_SKIPPED_WORKFLOWS=0 -export OCI_DEBUG=0 -export OCI_DELETE_MANIFEST_BEFORE_BLOBS=0 # defaults to OCI_DELETE_MANIFEST_BEFORE_BLOBS=1 if not set -``` - -Lastly, run the tests: -``` -./conformance.test -``` - -Note: for some registries, you may need to create `OCI_NAMESPACE` ahead of time. - -This will produce `junit.xml` and `report.html` in the current directory with the results. To choose an alternative directory: - -``` -export OCI_REPORT_DIR=/alternative/directory -``` - -To disable writing of the result files: - -``` -export OCI_REPORT_DIR=none -``` - -#### Testing registry workflows - -The tests are broken down into 4 major categories: - -1. Pull - Highest priority - All OCI registries MUST support pulling OCI container -images. -2. Push - Registries need a way to get content to be pulled, but clients can/should -be more forgiving here. For example, if needing to fallback after an unsupported endpoint. -3. Content Discovery - Includes tag listing (and possibly search in the future). -4. Content Management - Lowest Priority - Includes tag, blob, and repo deletion. -(Note: Many registries may have other ways to accomplish this than the OCI API.) - -In addition, each category has its own setup and teardown processes where appropriate. - -##### Pull - -The Pull tests validate that content can be retrieved from a registry. - -These tests are run when the following is set in the environment: -``` -OCI_TEST_PULL=1 -``` - -Regardless of whether the Push tests are enabled, as part of setup for the Pull tests, -content will be uploaded to the registry. -If you wish to prevent this, you can set the following environment variables pointing -to content already present in the registry: - -``` -# Optional: set to prevent automatic setup -OCI_MANIFEST_DIGEST= -OCI_TAG_NAME= -OCI_BLOB_DIGEST= -``` - -##### Push - -The Push tests validate that content can be uploaded to a registry. - -To enable the Push tests, you must explicitly set the following in the environment: - -``` -# Required to enable -OCI_TEST_PUSH=1 -``` - -Some registries may require a workaround for Authorization during the push flow. To set your own scope, set the following in the environment: - -``` -# Set the auth scope -OCI_AUTH_SCOPE="repository:mystuff/myrepo:pull,push" -``` - -Most registries currently require at least one layer to be uploaded (and referenced in the appropriate section of the manifest) -before a manifest upload will succeed. By default, the push tests will attempt to push two manifests: one with a single layer, -and another with no layers. If the empty-layer test is causing a failure, it can be skipped by setting the following in the -environment: - -``` -# Enable layer upload -OCI_SKIP_EMPTY_LAYER_PUSH_TEST=1 -``` - -The test suite will need access to a second namespace. This namespace is used to check support for cross-repository mounting -of blobs, and may need to be configured on the server-side in advance. It is specified by setting the following in -the environment: - -``` -# The destination repository for cross-repository mounting: -OCI_CROSSMOUNT_NAMESPACE="myorg/other" -``` - -If you want to test the behaviour of automatic content discovery, you should set the `OCI_AUTOMATIC_CROSSMOUNT` variable. - -``` -# Do not test automatic cross mounting -unset OCI_AUTOMATIC_CROSSMOUNT - -# Test that automatic cross mounting is working as expected -OCI_AUTOMATIC_CROSSMOUNT=1 - -# Test that automatic cross mounting is disabled -OCI_AUTOMATIC_CROSSMOUNT=0 -``` - -##### Content Discovery - -The Content Discovery tests validate that the contents of a registry can be discovered. - -To enable the Content Discovery tests, you must explicitly set the following in the environment: - -``` -# Required to enable -OCI_TEST_CONTENT_DISCOVERY=1 -``` - -As part of setup of these tests, a manifest and associated tags will be pushed to the registry. -If you wish to prevent this, you can set the following environment variable pointing -to list of tags to be returned from `GET /v2//tags/list`: - -``` -# Optional: set to prevent automatic setup -OCI_TAG_LIST=,,, -``` - -##### Content Management - -The Content Management tests validate that the contents of a registry can be deleted or otherwise modified. - -To enable the Content Management tests, you must explicitly set the following in the environment: - -``` -# Required to enable -OCI_TEST_CONTENT_MANAGEMENT=1 -``` - -Note: The Content Management tests explicitly depend upon the Push and Content Discovery tests, as there is no -way to test content management without also supporting push and content discovery. - -#### HTML Report -By default, the HTML report will show tests from all workflows. To hide workflows that have been disabled from -the report, you must set the following in the environment: - -``` -# Required to hide disabled workflows -OCI_HIDE_SKIPPED_WORKFLOWS=1 -``` - -#### Teardown Order - -By default, the teardown phase of each test deletes blobs before manifests. Some registries require the opposite order, deleting manifests before blobs. In this case, you must set the following in the environment: - -``` -# Required to delete manifests before blobs -OCI_DELETE_MANIFEST_BEFORE_BLOBS=1 -``` - -#### Container Image - -You may use the [Dockerfile](./Dockerfile) located in this directory -to build a container image that contains the test binary. - -Example (using `docker`): -``` -# build the image, using git SHA as the version -docker build -t conformance:latest \ - --build-arg VERSION=$(git log --format="%H" -n 1) . - -# run the image -docker run --rm \ - -v $(pwd)/results:/results \ - -w /results \ - -e OCI_ROOT_URL="https://r.myreg.io" \ - -e OCI_NAMESPACE="myorg/myrepo" \ - -e OCI_USERNAME="myuser" \ - -e OCI_PASSWORD="mypass" \ - -e OCI_TEST_PULL=1 \ - -e OCI_TEST_PUSH=1 \ - -e OCI_TEST_CONTENT_DISCOVERY=1 \ - -e OCI_TEST_CONTENT_MANAGEMENT=1 \ - -e OCI_HIDE_SKIPPED_WORKFLOWS=0 \ - -e OCI_DEBUG=0 \ - -e OCI_DELETE_MANIFEST_BEFORE_BLOBS=0 \ +```yaml +resultsDir: ./results +version: "1.1" +registry: localhost:5000 +tls: enabled +repo1: conformance/repo1 +repo2: conformance/repo2 +username: "" +password: "" +cacheAuth: true +logging: warn +filterTest: "" +apis: + pull: true + push: true + blobs: + atomic: true + delete: true + digestHeader: false + mountAnonymous: true + uploadCancel: false + manifests: + atomic: true + delete: true + digestHeader: false + tagParam: false + tags: + atomic: true + delete: true + list: true + referrer: true +data: + image: true + index: true + indexList: true + sparse: false + artifact: true + subject: true + subjectMissing: true + artifactList: true + subjectList: true + dataField: true + nondistributable: true + customFields: true + noLayers: true + emptyBlob: true + sha512: true +roData: + tags: [] + manifests: [] + blobs: [] + referrers: [] +``` + +## Running the Test + +The test is available to be run with Go, Docker, or GitHub Actions. + +### Go + +The tests require Go 1.24 or greater. + +They can be run directly with: + +```shell +go run -buildvcs=true . +``` + +Or to compile and run separately: + +```shell +go build -o conformance . +./conformance +``` + +### Docker + +First configure the test with environment variables or a configuration file as described above. +Then build and run the conformance test using a command similar to below: + +```shell +docker build -t conformance . +docker run -it --rm --net=host \ + -u "$(id -u):$(id -g)" \ + -v "$(pwd)/results:/results" \ + -e OCI_REGISTRY -e OCI_TLS -e OCI_REPO1 -e OCI_REPO2 -e OCI_USERNAME -e OCI_PASSWORD -e OCI_VERSION \ conformance:latest ``` -This will create a local `results/` directory containing all of the test report files. +Additional environment variables can be specified as needed, or the `oci-conformance.yaml` file can be passed as a volume, mounted at `/oci-conformance.yaml` inside the container. -#### GitHub Action +### GitHub Actions -A GitHub Action is provided by this repo which you can use -as part of a GitHub-based CI pipeline. +A GitHub Action is provided by this repo which you can use as part of a GitHub-based CI pipeline. -The following example will build the binary off of the main branch, -run the tests, and upload `junit.xml` and `report.html` as build artifacts: +The following example will build the binary off of the main branch, run the tests, and upload `results.yaml`, `report.html`, and `junit.xml` as build artifacts. +Configure the environment variables, and add additional settings as needed from the variables defined in [environment variables](#environment-variables) above. ```yaml # Place in repo at .github/workflows/oci-distribution-conformance.yml @@ -239,26 +191,27 @@ jobs: steps: - name: Run OCI Distribution Spec conformance tests uses: opencontainers/distribution-spec@main - # you can also run against a specific tag or commit instead - # uses: opencontainers/distribution-spec@v1.1.0 env: - OCI_ROOT_URL: https://myreg.io - OCI_NAMESPACE: mytestorg/mytestrepo + OCI_VERSION: "stable" + OCI_REGISTRY: "myreg.io" + OCI_TLS: "enabled" + OCI_REPO1: "conformance/repo1" + OCI_REPO2: "conformance/repo2" OCI_USERNAME: ${{ secrets.MY_REGISTRY_USERNAME }} OCI_PASSWORD: ${{ secrets.MY_REGISTRY_PASSWORD }} - OCI_TEST_PULL: 1 - OCI_TEST_PUSH: 1 - OCI_TEST_CONTENT_DISCOVERY: 1 - OCI_TEST_CONTENT_MANAGEMENT: 1 - OCI_HIDE_SKIPPED_WORKFLOWS: 0 - OCI_DEBUG: 0 - OCI_DELETE_MANIFEST_BEFORE_BLOBS: 0 ``` -You can also add a badge pointing to list of runs for this action using the following markdown: +You can also add a badge pointing to list of runs for this action using the following markdown (replacing `` and `` with your GitHub repo details): -``` +```markdown [![](https://github.com///workflows/oci-distribution-conformance/badge.svg)](https://github.com///actions?query=workflow%3Aoci-distribution-conformance) ``` -(replacing `` and `` with your GitHub repo details). +## Results + +A summary of the test is output to the screen along with any logging. +The results directory (`results` by default) is populated with the following files: + +- `result.yaml`: YAML parsable results of the API and data tests, including the redacted configuration. +- `report.html`: Full report of the test, including redacted output of each request and response. +- `junit.xml`: JUnit report. diff --git a/conformance/api.go b/conformance/api.go new file mode 100644 index 00000000..049e8214 --- /dev/null +++ b/conformance/api.go @@ -0,0 +1,1667 @@ +package main + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "maps" + "net/http" + "net/url" + "regexp" + "slices" + "strconv" + "strings" + + specs "github.com/opencontainers/distribution-spec/specs-go/v1" + digest "github.com/opencontainers/go-digest" + image "github.com/opencontainers/image-spec/specs-go/v1" +) + +var emptyDigest = digest.Canonical.FromBytes([]byte{}) + +type api struct { + client *http.Client + user, pass string + authCache map[string]string +} + +type apiOpt func(*api) + +func apiNew(client *http.Client, opts ...apiOpt) *api { + a := &api{ + client: client, + } + for _, opt := range opts { + opt(a) + } + return a +} + +func apiWithAuth(user, pass string, cacheAuth bool) apiOpt { + return func(a *api) { + a.user = user + a.pass = pass + if cacheAuth { + a.authCache = map[string]string{} + } + } +} + +type apiDoOpt struct { + reqFn func(*http.Request) error + respFn func(*http.Response) error + out io.Writer + flags map[string]bool +} + +func (a *api) Do(opts ...apiDoOpt) error { + errs := []error{} + reqFns := []func(*http.Request) error{} + respFns := []func(*http.Response) error{} + var out io.Writer + for _, opt := range opts { + if opt.reqFn != nil { + reqFns = append(reqFns, opt.reqFn) + } + if opt.respFn != nil { + respFns = append(respFns, opt.respFn) + } + if opt.out != nil { + out = opt.out + } + } + req, err := http.NewRequest(http.MethodGet, "", nil) + if err != nil { + return err + } + for _, reqFn := range reqFns { + err := reqFn(req) + if err != nil { + errs = append(errs, err) + } + } + if len(errs) == 1 { + return errs[0] + } else if len(errs) > 1 { + return errors.Join(errs...) + } + // add cached auth header if available + if a.authCache != nil { + err = a.addCachedAuth(req) + if err != nil { + return err + } + } + if out != nil { + out = redactWriter{w: out} + } + wt := &wrapTransport{out: out, orig: a.client.Transport} + if a.client.Transport == nil { + wt.orig = http.DefaultTransport + } + c := *a.client + c.Transport = wt + resp, err := c.Do(req) + if err != nil { + return err + } + // on auth failures, generate the auth header and retry + if resp.StatusCode == http.StatusUnauthorized { + auth, err := a.getAuthHeader(c, resp) + if err != nil { + errs = append(errs, err) + } + if resp.Body != nil { + _ = resp.Body.Close() + } + if err == nil && auth != "" { + req.Header.Set("Authorization", auth) + if req.GetBody != nil { + req.Body, err = req.GetBody() + if err != nil { + return fmt.Errorf("failed to reset body after auth request: %w", err) + } + } + resp, err = c.Do(req) + if err != nil { + return err + } + } + } + for _, respFn := range respFns { + err := respFn(resp) + if err != nil { + errs = append(errs, err) + } + } + if resp.Body != nil { + _ = resp.Body.Close() + } + if len(errs) == 1 { + return errs[0] + } else if len(errs) > 1 { + return errors.Join(errs...) + } + return nil +} + +func (a *api) GetFlags(opts ...apiDoOpt) map[string]bool { + ret := map[string]bool{} + for _, opt := range opts { + maps.Copy(ret, opt.flags) + } + return ret +} + +func (a *api) VerifyDigest(resp *http.Response, dig digest.Digest, opts ...apiDoOpt) error { + flags := a.GetFlags(opts...) + digHeader := resp.Header.Get("Docker-Content-Digest") + if digHeader == "" && flags["RequireDigestHeader"] { + return fmt.Errorf("registry did not return a Docker-Content-Digest header") + } + if digHeader != "" && dig.String() != "" && digHeader != dig.String() { + return fmt.Errorf("Docker-Content-Digest header value expected %q, received %q", dig.String(), digHeader) + } + return nil +} + +func (a *api) BlobDelete(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + u, err := url.Parse(registry + "/v2/" + repo + "/blobs/" + dig.String()) + if err != nil { + return err + } + var status int + err = a.Do( + apiWithMethod("DELETE"), + apiWithURL(u), + apiExpectStatus(http.StatusAccepted, http.StatusNotFound, http.StatusMethodNotAllowed), + apiReturnStatus(&status), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob delete failed: %w", err) + } + if status == http.StatusMethodNotAllowed { + return fmt.Errorf("registry returned status %d%.0w", status, errRegUnsupported) + } + return nil +} + +func (a *api) BlobGetReq(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + u, err := url.Parse(registry + "/v2/" + repo + "/blobs/" + dig.String()) + if err != nil { + return err + } + err = a.Do( + apiWithMethod("GET"), + apiWithURL(u), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob get failed: %w", err) + } + return nil +} + +func (a *api) BlobGetExistsFull(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + resp := http.Response{Header: http.Header{}} + opts = append(opts, + apiExpectStatus(http.StatusOK), + apiReturnResponse(&resp), + ) + if val, ok := td.blobs[dig]; ok && (len(val) > 0 || dig == emptyDigest) { + opts = append(opts, apiExpectBody(val), apiExpectHeader("Content-Length", fmt.Sprintf("%d", len(val)))) + } + errs := []error{} + if err := a.BlobGetReq(registry, repo, dig, td, opts...); err != nil { + errs = append(errs, err) + } + if err := a.VerifyDigest(&resp, dig, opts...); err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) +} + +func (a *api) BlobHeadReq(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + u, err := url.Parse(registry + "/v2/" + repo + "/blobs/" + dig.String()) + if err != nil { + return err + } + err = a.Do( + apiWithMethod("HEAD"), + apiWithURL(u), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob head failed: %w", err) + } + return nil +} + +func (a *api) BlobHeadExists(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + resp := http.Response{Header: http.Header{}} + opts = append(opts, + apiExpectStatus(http.StatusOK), + apiExpectBody([]byte{}), + apiReturnResponse(&resp), + ) + if val, ok := td.blobs[dig]; ok && (len(val) > 0 || dig == emptyDigest) { + opts = append(opts, apiExpectHeader("Content-Length", fmt.Sprintf("%d", len(val)))) + } + errs := []error{} + if err := a.BlobHeadReq(registry, repo, dig, td, opts...); err != nil { + errs = append(errs, err) + } + if err := a.VerifyDigest(&resp, dig, opts...); err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) +} + +func (a *api) BlobMount(registry, repo, source string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + bodyBytes, ok := td.blobs[dig] + if !ok { + return fmt.Errorf("BlobPostPut missing expected digest to send: %s%.0w", dig.String(), errAPITestError) + } + u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/") + if err != nil { + return err + } + qa := u.Query() + qa.Set("mount", dig.String()) + if source != "" { + qa.Set("from", source) + } + u.RawQuery = qa.Encode() + // TODO: add digest algorithm if not sha256 + errs := []error{} + loc := "" + status := 0 + resp := http.Response{Header: http.Header{}} + err = a.Do( + apiWithMethod("POST"), + apiWithURL(u), + apiExpectStatus(http.StatusCreated, http.StatusAccepted), + apiReturnHeader("Location", &loc), + apiReturnStatus(&status), + apiReturnResponse(&resp), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob post failed: %w", err) + } + if loc == "" { + return fmt.Errorf("blob post did not return a location") + } + if status == http.StatusAccepted { + // fallback to post+put + u, err = u.Parse(loc) + if err != nil { + return fmt.Errorf("blob post could not parse location header %q: %w", loc, err) + } + qa = u.Query() + qa.Set("digest", dig.String()) + u.RawQuery = qa.Encode() + err = a.Do( + apiWithMethod("PUT"), + apiWithURL(u), + apiWithContentLength(int64(len(bodyBytes))), + apiWithHeaderAdd("Content-Type", mtOctetStream), + apiWithBody(bodyBytes), + apiExpectStatus(http.StatusCreated), + apiReturnHeader("Location", &loc), + apiReturnResponse(&resp), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob put failed: %w", err) + } + errs = append(errs, fmt.Errorf("registry returned status %d, fell back to blob POST+PUT%.0w", status, errRegUnsupported)) + } else if status != http.StatusCreated { + return fmt.Errorf("blob mount returned status %d", status) + } + if err := a.VerifyDigest(&resp, dig, opts...); err != nil { + return err + } + if err := a.BlobVerifyLocation(u, loc, bodyBytes, opts...); err != nil { + return err + } + return errors.Join(errs...) +} + +func (a *api) BlobPatchChunked(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + flags := a.GetFlags(opts...) + bodyBytes, ok := td.blobs[dig] + if !ok { + return fmt.Errorf("BlobPatchChunked missing expected digest to send: %s%.0w", dig.String(), errAPITestError) + } + u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/") + if err != nil { + return err + } + // TODO: add digest algorithm if not sha256 + minStr := "" + loc := "" + resp := http.Response{Header: http.Header{}} + err = a.Do( + apiWithMethod("POST"), + apiWithURL(u), + apiWithContentLength(0), + apiExpectStatus(http.StatusAccepted), + apiReturnHeader("OCI-Chunk-Min-Length", &minStr), + apiReturnHeader("Location", &loc), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob post failed: %w", err) + } + // calc chunk size to make 3 chunks, adjust to min chunk size if specified + chunkSize := len(bodyBytes)/3 + 1 + if minStr != "" { + min, err := strconv.Atoi(minStr) + if err != nil { + return fmt.Errorf("parsing OCI-Chunk-Min-Length size %q failed: %w", minStr, err) + } + if min > chunkSize { + chunkSize = min + } + } + if chunkSize < chunkMin { + chunkSize = chunkMin + } + if chunkSize > len(bodyBytes) { + chunkSize = len(bodyBytes) + } + lastByte := -1 + // loop over the number of chunks + for lastByte < len(bodyBytes)-1 { + if flags["OutOfOrderChunks"] { + if loc == "" { + return fmt.Errorf("blob request did not return a location") + } + u, err = u.Parse(loc) + if err != nil { + return fmt.Errorf("blob request could not parse location header: %w", err) + } + // send an out of order chunk, skipping ahead or back to the beginning + badStart := lastByte + 1 + chunkSize + if badStart >= len(bodyBytes) { + badStart = 0 + } + badLastByte := min(badStart+chunkSize-1, len(bodyBytes)-1) + method := "PATCH" + if flags["PutLastChunk"] && badLastByte == len(bodyBytes)-1 { + method = "PUT" + qa := u.Query() + qa.Set("digest", dig.String()) + u.RawQuery = qa.Encode() + } + err = a.Do( + apiWithMethod(method), + apiWithURL(u), + apiWithContentLength(int64(badLastByte-badStart+1)), + apiWithHeaderAdd("Content-Type", mtOctetStream), + apiWithHeaderAdd("Content-Range", fmt.Sprintf("%d-%d", badStart, badLastByte)), + apiWithBody(bodyBytes[badStart:badLastByte+1]), + apiExpectStatus(http.StatusRequestedRangeNotSatisfiable), + apiReturnHeader("Location", &loc), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob out of order chunk: %w", err) + } + // recover with a GET request to find the new location/range + rangeHeader := "" + err = a.Do( + apiWithMethod("GET"), + apiWithURL(u), + apiExpectStatus(http.StatusNoContent), + apiReturnHeader("Location", &loc), + apiReturnHeader("Range", &rangeHeader), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob chunked upload get request: %w", err) + } + rangeHeader, found := strings.CutPrefix(rangeHeader, "0-") + if !found { + return fmt.Errorf("content-range header is missing the 0- prefix: %q", rangeHeader) + } + rangeLastByte, err := strconv.Atoi(rangeHeader) + if err != nil { + return fmt.Errorf("content-range header could not be parsed: %q", rangeHeader) + } + if lastByte >= 0 && rangeLastByte != lastByte { + return fmt.Errorf("content-range unexpected, received %q, expected \"0-%d\"", rangeHeader, lastByte) + } + } + if loc == "" { + return fmt.Errorf("blob request did not return a location") + } + u, err = u.Parse(loc) + if err != nil { + return fmt.Errorf("blob request could not parse location header: %w", err) + } + start := lastByte + 1 + lastByte = min(start+chunkSize-1, len(bodyBytes)-1) + var chunkOpts []apiDoOpt + if flags["PutLastChunk"] && lastByte == len(bodyBytes)-1 { + qa := u.Query() + qa.Set("digest", dig.String()) + u.RawQuery = qa.Encode() + chunkOpts = append([]apiDoOpt{ + apiWithMethod("PUT"), + }, opts...) + if flags["ExpectBadDigest"] { + chunkOpts = append(chunkOpts, + apiExpectStatus(http.StatusBadRequest), + ) + } else { + chunkOpts = append(chunkOpts, + apiExpectStatus(http.StatusCreated), + apiReturnHeader("Location", &loc), + apiReturnResponse(&resp), + ) + } + } else { + chunkOpts = append([]apiDoOpt{ + apiWithMethod("PATCH"), + apiExpectStatus(http.StatusAccepted), + apiReturnHeader("Location", &loc), + }, opts...) + } + err = a.Do( + apiWithURL(u), + apiWithContentLength(int64(lastByte-start+1)), + apiWithHeaderAdd("Content-Type", mtOctetStream), + apiWithHeaderAdd("Content-Range", fmt.Sprintf("%d-%d", start, lastByte)), + apiWithBody(bodyBytes[start:lastByte+1]), + apiWithAnd(chunkOpts), + ) + if err != nil { + return fmt.Errorf("blob patch failed: %w", err) + } + } + if !flags["PutLastChunk"] { + if loc == "" { + return fmt.Errorf("blob patch did not return a location") + } + u, err = u.Parse(loc) + if err != nil { + return fmt.Errorf("blob patch could not parse location header: %w", err) + } + qa := u.Query() + qa.Set("digest", dig.String()) + u.RawQuery = qa.Encode() + var putOpts []apiDoOpt + if flags["ExpectBadDigest"] { + putOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusBadRequest), + }, opts...) + } else { + putOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusCreated), + apiReturnHeader("Location", &loc), + apiReturnResponse(&resp), + }, opts...) + } + err = a.Do( + apiWithMethod("PUT"), + apiWithURL(u), + apiWithContentLength(0), + apiWithHeaderAdd("Content-Type", mtOctetStream), + apiWithAnd(putOpts), + ) + if err != nil { + return fmt.Errorf("blob put failed: %w", err) + } + } + if flags["ExpectBadDigest"] { + return nil + } + if err := a.VerifyDigest(&resp, dig, opts...); err != nil { + return err + } + if err := a.BlobVerifyLocation(u, loc, bodyBytes, opts...); err != nil { + return err + } + return nil +} + +func (a *api) BlobPatchStream(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + flags := a.GetFlags(opts...) + bodyBytes, ok := td.blobs[dig] + if !ok { + return fmt.Errorf("BlobPatchStream missing expected digest to send: %s%.0w", dig.String(), errAPITestError) + } + u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/") + if err != nil { + return err + } + // TODO: add digest algorithm if not sha256 + loc := "" + resp := http.Response{Header: http.Header{}} + err = a.Do( + apiWithMethod("POST"), + apiWithURL(u), + apiWithContentLength(0), + apiExpectStatus(http.StatusAccepted), + apiReturnHeader("Location", &loc), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob post failed: %w", err) + } + if loc == "" { + return fmt.Errorf("blob post did not return a location") + } + u, err = u.Parse(loc) + if err != nil { + return fmt.Errorf("blob post could not parse location header: %w", err) + } + err = a.Do( + apiWithMethod("PATCH"), + apiWithURL(u), + apiWithHeaderAdd("Content-Type", mtOctetStream), + apiWithBody(bodyBytes), + apiExpectStatus(http.StatusAccepted), + apiReturnHeader("Location", &loc), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob patch failed: %w", err) + } + if loc == "" { + return fmt.Errorf("blob patch did not return a location") + } + u, err = u.Parse(loc) + if err != nil { + return fmt.Errorf("blob patch could not parse location header: %w", err) + } + qa := u.Query() + qa.Set("digest", dig.String()) + u.RawQuery = qa.Encode() + var putOpts []apiDoOpt + if flags["ExpectBadDigest"] { + putOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusBadRequest), + }, opts...) + } else { + putOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusCreated), + apiExpectHeader("Location", ""), + }, opts...) + } + err = a.Do( + apiWithMethod("PUT"), + apiWithURL(u), + apiWithContentLength(0), + apiWithHeaderAdd("Content-Type", mtOctetStream), + apiReturnHeader("Location", &loc), + apiReturnResponse(&resp), + apiWithAnd(putOpts), + ) + if err != nil { + return fmt.Errorf("blob put failed: %w", err) + } + if flags["ExpectBadDigest"] { + return nil + } + if err := a.VerifyDigest(&resp, dig, opts...); err != nil { + return err + } + if err := a.BlobVerifyLocation(u, loc, bodyBytes, opts...); err != nil { + return err + } + return nil +} + +func (a *api) BlobPostCancel(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/") + if err != nil { + return err + } + loc := "" + err = a.Do( + apiWithMethod("POST"), + apiWithURL(u), + apiExpectStatus(http.StatusAccepted), + apiReturnHeader("Location", &loc), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob post failed: %w", err) + } + if loc == "" { + return fmt.Errorf("blob post did not return a location") + } + u, err = u.Parse(loc) + if err != nil { + return fmt.Errorf("blob post could not parse location header: %w", err) + } + err = a.Do( + apiWithMethod("DELETE"), + apiWithURL(u), + apiWithContentLength(0), + apiExpectStatus(http.StatusNoContent), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob cancel failed: %w", err) + } + return nil +} + +func (a *api) BlobPostOnly(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + flags := a.GetFlags(opts...) + bodyBytes, ok := td.blobs[dig] + if !ok { + return fmt.Errorf("BlobPostOnly missing expected digest to send: %s%.0w", dig.String(), errAPITestError) + } + u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/") + if err != nil { + return err + } + qa := u.Query() + qa.Set("digest", dig.String()) + u.RawQuery = qa.Encode() + loc := "" + resp := http.Response{Header: http.Header{}} + var status int + var postOpts []apiDoOpt + if flags["ExpectBadDigest"] { + postOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusBadRequest, http.StatusAccepted), + }, opts...) + } else { + postOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusCreated, http.StatusAccepted), + apiExpectHeader("Location", ""), + }, opts...) + } + err = a.Do( + apiWithMethod("POST"), + apiWithURL(u), + apiWithContentLength(int64(len(bodyBytes))), + apiWithHeaderAdd("Content-Type", mtOctetStream), + apiWithBody(bodyBytes), + apiReturnStatus(&status), + apiReturnHeader("Location", &loc), + apiReturnResponse(&resp), + apiWithAnd(postOpts), + ) + if err != nil { + return fmt.Errorf("blob post failed: %w", err) + } + if status == http.StatusAccepted { + // fallback to a PUT request, but track the unsupported API + var putOpts []apiDoOpt + if flags["ExpectBadDigest"] { + putOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusBadRequest), + }, opts...) + } else { + putOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusCreated), + apiReturnHeader("Location", &loc), + }, opts...) + } + u, err = u.Parse(loc) + if err != nil { + return fmt.Errorf("blob post could not parse location header: %w", err) + } + qa := u.Query() + qa.Set("digest", dig.String()) + u.RawQuery = qa.Encode() + err = a.Do( + apiWithMethod("PUT"), + apiWithURL(u), + apiWithContentLength(int64(len(bodyBytes))), + apiWithHeaderAdd("Content-Type", mtOctetStream), + apiWithBody(bodyBytes), + apiReturnHeader("Location", &loc), + apiReturnResponse(&resp), + apiWithAnd(putOpts), + ) + if err != nil { + return fmt.Errorf("blob post failed: %w", err) + } + return fmt.Errorf("registry does not support content in the POST, fallback to PUT%.0w", errRegUnsupported) + } + if flags["ExpectBadDigest"] { + return nil + } + if err := a.VerifyDigest(&resp, dig, opts...); err != nil { + return err + } + if err := a.BlobVerifyLocation(u, loc, bodyBytes, opts...); err != nil { + return err + } + return nil +} + +func (a *api) BlobPostPut(registry, repo string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + flags := a.GetFlags(opts...) + bodyBytes, ok := td.blobs[dig] + if !ok { + return fmt.Errorf("BlobPostPut missing expected digest to send: %s%.0w", dig.String(), errAPITestError) + } + u, err := url.Parse(registry + "/v2/" + repo + "/blobs/uploads/") + if err != nil { + return err + } + // TODO: add digest algorithm if not sha256 + loc := "" + resp := http.Response{Header: http.Header{}} + err = a.Do( + apiWithMethod("POST"), + apiWithURL(u), + apiExpectStatus(http.StatusAccepted), + apiReturnHeader("Location", &loc), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("blob post failed: %w", err) + } + if loc == "" { + return fmt.Errorf("blob post did not return a location") + } + u, err = u.Parse(loc) + if err != nil { + return fmt.Errorf("blob post could not parse location header: %w", err) + } + qa := u.Query() + qa.Set("digest", dig.String()) + u.RawQuery = qa.Encode() + var putOpts []apiDoOpt + if flags["ExpectBadDigest"] { + putOpts = append([]apiDoOpt{apiExpectStatus(http.StatusBadRequest)}, + opts...) + } else { + putOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusCreated), + apiReturnHeader("Location", &loc), + apiReturnResponse(&resp), + }, opts...) + } + err = a.Do( + apiWithMethod("PUT"), + apiWithURL(u), + apiWithContentLength(int64(len(bodyBytes))), + apiWithHeaderAdd("Content-Type", mtOctetStream), + apiWithBody(bodyBytes), + apiWithAnd(putOpts), + ) + if err != nil { + return fmt.Errorf("blob put failed: %w", err) + } + if flags["ExpectBadDigest"] { + return nil + } + if err := a.VerifyDigest(&resp, dig, opts...); err != nil { + return err + } + if err := a.BlobVerifyLocation(u, loc, bodyBytes, opts...); err != nil { + return err + } + return nil +} + +func (a *api) BlobVerifyLocation(u *url.URL, loc string, bodyBytes []byte, opts ...apiDoOpt) error { + if loc == "" { + return fmt.Errorf("location header missing") + } + u, err := u.Parse(loc) + if err != nil { + return fmt.Errorf("could not parse location header %q: %w", loc, err) + } + err = a.Do( + apiWithMethod("GET"), + apiWithURL(u), + apiExpectBody(bodyBytes), + apiExpectStatus(http.StatusOK), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("failed to verify returned location: %w", err) + } + return nil +} + +func (a *api) ManifestDelete(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + u, err := url.Parse(registry + "/v2/" + repo + "/manifests/" + ref) + if err != nil { + return err + } + var status int + err = a.Do( + apiWithMethod("DELETE"), + apiWithURL(u), + apiExpectStatus(http.StatusAccepted, http.StatusNotFound, http.StatusBadRequest, http.StatusMethodNotAllowed), + apiReturnStatus(&status), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("manifest delete failed: %w", err) + } + if status == http.StatusBadRequest || status == http.StatusMethodNotAllowed { + return fmt.Errorf("registry returned status %d%.0w", status, errRegUnsupported) + } + return nil +} + +func (a *api) ManifestGetReq(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + u, err := url.Parse(registry + "/v2/" + repo + "/manifests/" + ref) + if err != nil { + return err + } + err = a.Do( + apiWithMethod("GET"), + apiWithURL(u), + apiWithHeaderAdd("Accept", mtOCIIndex), + apiWithHeaderAdd("Accept", mtOCIImage), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("manifest get failed: %w", err) + } + return nil +} + +func (a *api) ManifestGetExists(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + opts = append(opts, + apiExpectStatus(http.StatusOK), + ) + resp := http.Response{Header: http.Header{}} + if val, ok := td.manifests[dig]; ok && len(val) > 0 { + mediaType := detectMediaType(val) + opts = append(opts, + apiExpectBody(val), + apiExpectHeader("Content-Type", mediaType), + apiExpectHeader("Content-Length", fmt.Sprintf("%d", len(val))), + apiReturnResponse(&resp), + ) + } + errs := []error{} + if err := a.ManifestGetReq(registry, repo, ref, dig, td, opts...); err != nil { + errs = append(errs, err) + } + if err := a.VerifyDigest(&resp, dig, opts...); err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) +} + +func (a *api) ManifestHeadReq(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + u, err := url.Parse(registry + "/v2/" + repo + "/manifests/" + ref) + if err != nil { + return err + } + err = a.Do( + apiWithMethod("HEAD"), + apiWithURL(u), + apiWithHeaderAdd("Accept", mtOCIIndex), + apiWithHeaderAdd("Accept", mtOCIImage), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("manifest head failed: %w", err) + } + return nil +} + +func (a *api) ManifestHeadExists(registry, repo, ref string, dig digest.Digest, td *testData, opts ...apiDoOpt) error { + opts = append(opts, + apiExpectStatus(http.StatusOK), + apiExpectBody([]byte{}), + ) + resp := http.Response{Header: http.Header{}} + if val, ok := td.manifests[dig]; ok && len(val) > 0 { + mediaType := detectMediaType(val) + opts = append(opts, + apiExpectHeader("Content-Type", mediaType), + apiExpectHeader("Content-Length", fmt.Sprintf("%d", len(val))), + apiReturnResponse(&resp), + ) + } + errs := []error{} + if err := a.ManifestHeadReq(registry, repo, ref, dig, td, opts...); err != nil { + errs = append(errs, err) + } + if err := a.VerifyDigest(&resp, dig, opts...); err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) +} + +func (a *api) ManifestPut(registry, repo, ref string, dig digest.Digest, td *testData, referrersEnabled bool, putOpts []apiDoOpt, opts ...apiDoOpt) error { + flags := a.GetFlags(opts...) + bodyBytes, ok := td.manifests[dig] + if !ok { + return fmt.Errorf("ManifestPut missing expected digest to send: %s%.0w", dig.String(), errAPITestError) + } + u, err := url.Parse(registry + "/v2/" + repo + "/manifests/" + ref) + if err != nil { + return err + } + mediaType := detectMediaType(bodyBytes) + resp := http.Response{Header: http.Header{}} + loc := "" + putOpts = append(putOpts, opts...) + if flags["ExpectBadDigest"] { + putOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusBadRequest), + }, putOpts...) + } else { + putOpts = append([]apiDoOpt{ + apiExpectStatus(http.StatusCreated), + apiReturnHeader("Location", &loc), + }, putOpts...) + } + if referrersEnabled { + // if the referrers API is being tested, verify OCI-Subject header is returned when appropriate + subj := detectSubject(td.manifests[dig]) + if subj != nil { + putOpts = append(putOpts, apiExpectHeader("OCI-Subject", subj.Digest.String())) + } + } + errs := []error{} + err = a.Do( + apiWithMethod("PUT"), + apiWithURL(u), + apiWithBody(bodyBytes), + apiWithHeaderAdd("Content-Type", mediaType), + apiReturnResponse(&resp), + apiWithAnd(putOpts), + ) + if err != nil { + errs = append(errs, fmt.Errorf("manifest put failed: %w", err)) + } + // do not validate response if a failure was expected + if flags["ExpectBadDigest"] { + return errors.Join(errs...) + } + // validate the digest header + digHeader := resp.Header.Get("Docker-Content-Digest") + if digHeader == "" && flags["RequestDigestHeader"] { + errs = append(errs, fmt.Errorf("registry did not return a Docker-Content-Digest header")) + } + if digHeader != "" && digHeader != dig.String() { + errs = append(errs, fmt.Errorf("Docker-Content-Digest header value expected %q, received %q", dig.String(), digHeader)) + } + // verify returned location + if loc == "" { + errs = append(errs, fmt.Errorf("blob put did not return a location")) + } else { + u, err = u.Parse(loc) + if err != nil { + errs = append(errs, fmt.Errorf("could not parse location header %q: %w", loc, err)) + } + if err == nil { + err = a.Do( + apiWithMethod("GET"), + apiWithURL(u), + apiWithHeaderAdd("Accept", mtOCIIndex), + apiWithHeaderAdd("Accept", mtOCIImage), + apiExpectStatus(http.StatusOK), + apiExpectHeader("Content-Type", mediaType), + apiExpectHeader("Content-Length", fmt.Sprintf("%d", len(bodyBytes))), + apiExpectBody(bodyBytes), + apiWithAnd(opts), + ) + if err != nil { + errs = append(errs, fmt.Errorf("failed to verify returned location: %w", err)) + } + } + } + return errors.Join(errs...) +} + +func (a *api) PingReq(registry string, opts ...apiDoOpt) error { + u, err := url.Parse(registry + "/v2/") + if err != nil { + return err + } + err = a.Do( + apiWithMethod("GET"), + apiWithURL(u), + apiWithAnd(opts), + ) + if err != nil { + return fmt.Errorf("registry ping failed: %w", err) + } + return nil +} + +func (a *api) ReferrersList(registry, repo string, dig digest.Digest, opts ...apiDoOpt) (image.Index, error) { + rl := image.Index{} + u, err := url.Parse(registry + "/v2/" + repo + "/referrers/" + dig.String()) + if err != nil { + return rl, err + } + err = a.Do( + apiWithURL(u), + apiExpectHeader("Content-Type", mtOCIIndex), + apiExpectStatus(http.StatusOK), + apiReturnJSONBody(&rl), + apiWithAnd(opts), + ) + // validate the response + if err == nil && (rl.MediaType != mtOCIIndex || rl.SchemaVersion != 2) { + err = fmt.Errorf("referrers response is not a valid OCI index (media type and schema version)%.0w", errAPITestFail) + } + return rl, err +} + +func (a *api) TagList(registry, repo string, opts ...apiDoOpt) (specs.TagList, error) { + tl := specs.TagList{} + u, err := url.Parse(registry + "/v2/" + repo + "/tags/list") + if err != nil { + return tl, err + } + err = a.Do( + apiWithURL(u), + apiWithOr( + []apiDoOpt{ + apiExpectStatus(http.StatusOK), + apiReturnJSONBody(&tl), + }, + []apiDoOpt{ + apiExpectStatus(http.StatusNotFound), + }, + ), + apiWithAnd(opts), + ) + return tl, err +} + +func apiWithAnd(opts []apiDoOpt) apiDoOpt { + ret := apiDoOpt{} + reqFns := [](func(*http.Request) error){} + respFns := [](func(*http.Response) error){} + for _, opt := range opts { + if opt.reqFn != nil { + reqFns = append(reqFns, opt.reqFn) + } + if opt.respFn != nil { + respFns = append(respFns, opt.respFn) + } + if opt.out != nil { + ret.out = opt.out + } + } + if len(reqFns) == 1 { + ret.reqFn = reqFns[0] + } else if len(reqFns) > 0 { + ret.reqFn = func(r *http.Request) error { + errs := []error{} + for _, fn := range reqFns { + err := fn(r) + if err != nil { + errs = append(errs, err) + } + } + if len(errs) == 1 { + return errs[0] + } + return errors.Join(errs...) + } + } + if len(respFns) == 1 { + ret.respFn = respFns[0] + } else if len(respFns) > 0 { + ret.respFn = func(r *http.Response) error { + errs := []error{} + for _, fn := range respFns { + err := fn(r) + if err != nil { + errs = append(errs, err) + } + } + if len(errs) == 1 { + return errs[0] + } + return errors.Join(errs...) + } + } + return ret +} + +// apiWithOr succeeds with any of the lists of respFn's are all successful. +// Note that reqFn entries are ignored. +func apiWithOr(optLists ...[]apiDoOpt) apiDoOpt { + return apiDoOpt{ + respFn: func(resp *http.Response) error { + errsOr := []error{} + for _, opts := range optLists { + errsResp := []error{} + for _, opt := range opts { + if opt.respFn != nil { + err := opt.respFn(resp) + if err != nil { + errsResp = append(errsResp, err) + } + } + } + if len(errsResp) == 0 { + return nil + } + errsOr = append(errsOr, errors.Join(errsResp...)) + } + return fmt.Errorf("response did not match any condition: %w", errors.Join(errsOr...)) + }, + } +} + +func apiWithFlag(flag string) apiDoOpt { + return apiDoOpt{ + flags: map[string]bool{flag: true}, + } +} + +func apiWithMethod(method string) apiDoOpt { + return apiDoOpt{ + reqFn: func(req *http.Request) error { + req.Method = method + return nil + }, + } +} + +func apiWithURL(u *url.URL) apiDoOpt { + return apiDoOpt{ + reqFn: func(req *http.Request) error { + req.URL = u + return nil + }, + } +} + +func apiWithURLParam(key, val string) apiDoOpt { + return apiDoOpt{ + reqFn: func(req *http.Request) error { + if req.URL == nil { + return fmt.Errorf("URL must be set before adding a parameter") + } + params := req.URL.Query() + params.Add(key, val) + req.URL.RawQuery = params.Encode() + return nil + }, + } +} + +func apiWithContentLength(l int64) apiDoOpt { + return apiDoOpt{ + reqFn: func(req *http.Request) error { + req.ContentLength = l + if req.Header == nil { + req.Header = http.Header{} + } + req.Header.Add("Content-Length", fmt.Sprintf("%d", l)) + return nil + }, + } +} + +func apiWithHeaderAdd(key, value string) apiDoOpt { + return apiDoOpt{ + reqFn: func(req *http.Request) error { + if req.Header == nil { + req.Header = http.Header{} + } + req.Header.Add(key, value) + return nil + }, + } +} + +func apiWithBody(body []byte) apiDoOpt { + return apiDoOpt{ + reqFn: func(req *http.Request) error { + if req.Body != nil { + _ = req.Body.Close() + } + req.Body = io.NopCloser(bytes.NewReader(body)) + req.GetBody = func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(body)), nil + } + return nil + }, + } +} + +func apiExpectBody(bodyExpect []byte) apiDoOpt { + return apiDoOpt{ + respFn: func(resp *http.Response) error { + // read body and replace with a buf reader + bodyReceived, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed to read body: %w", err) + } + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(bodyReceived)) + if bytes.Equal(bodyExpect, bodyReceived) { + return nil + } + var bufExpect, bufReceived bytes.Buffer + err = printBody(bodyReceived, &bufReceived) + if err != nil { + return fmt.Errorf("failed to print received body: %w", err) + } + err = printBody(bodyExpect, &bufExpect) + if err != nil { + return fmt.Errorf("failed to print expected body: %w", err) + } + return fmt.Errorf("body contents mismatch, expected %s, received %s", strings.TrimSpace(bufExpect.String()), strings.TrimSpace(bufReceived.String())) + }, + } +} + +func apiExpectHeader(key, val string) apiDoOpt { + return apiDoOpt{ + respFn: func(resp *http.Response) error { + cur := resp.Header.Values(key) + if val == "" { + if len(cur) == 0 { + return fmt.Errorf("missing header %q", key) + } + } else { + if !slices.Contains(cur, val) { + return fmt.Errorf("header value mismatch for %q, expected %q, received %q", key, val, cur) + } + } + return nil + }, + } +} + +func apiExpectStatus(statusCodes ...int) apiDoOpt { + return apiDoOpt{ + respFn: func(resp *http.Response) error { + if slices.Contains(statusCodes, resp.StatusCode) { + return nil + } + return fmt.Errorf("unexpected status code, expected one of %v, received %d", statusCodes, resp.StatusCode) + }, + } +} + +func apiReturnHeader(key string, val *string) apiDoOpt { + return apiDoOpt{ + respFn: func(resp *http.Response) error { + cur := resp.Header.Get(key) + if cur != "" { + *val = cur + } + return nil + }, + } +} + +func apiReturnJSONBody(data any) apiDoOpt { + return apiDoOpt{ + respFn: func(resp *http.Response) error { + return json.NewDecoder(resp.Body).Decode(data) + }, + } +} + +func apiReturnResponse(ret *http.Response) apiDoOpt { + return apiDoOpt{ + respFn: func(r *http.Response) error { + *ret = *r + return nil + }, + } +} + +func apiReturnStatus(status *int) apiDoOpt { + return apiDoOpt{ + respFn: func(resp *http.Response) error { + *status = resp.StatusCode + return nil + }, + } +} + +func apiSaveOutput(out io.Writer) apiDoOpt { + return apiDoOpt{ + out: out, + } +} + +type authHeader struct { + Type string + Realm string + Service string + Scope string +} + +type authInfo struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` +} + +func (a *api) getAuthHeader(client http.Client, resp *http.Response) (string, error) { + cacheKey, err := authCacheKey(*resp.Request) + if err != nil { + return "", err + } + header := resp.Header.Get("WWW-Authenticate") + if resp.StatusCode != http.StatusUnauthorized || header == "" { + return "", fmt.Errorf("status code or header invalid for adding auth, status %d, header %s", resp.StatusCode, header) + } + parsed, err := parseAuthHeader(header) + if err != nil { + return "", err + } + if parsed.Type == "basic" { + auth := fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(a.user+":"+a.pass))) + if a.authCache != nil { + a.authCache[cacheKey] = auth + } + return auth, nil + } + if parsed.Type == "bearer" { + u, err := resp.Request.URL.Parse(parsed.Realm) + if err != nil { + return "", fmt.Errorf("failed to parse realm url: %w", err) + } + param := url.Values{} + param.Set("service", parsed.Service) + if parsed.Scope != "" { + param.Set("scope", parsed.Scope) + } + u.RawQuery = param.Encode() + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return "", fmt.Errorf("failed to created request: %w", err) + } + req.Header.Set("Accept", "application/json") + req.SetBasicAuth(a.user, a.pass) + authResp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("failed to send auth request: %w", err) + } + if authResp.Body != nil { + defer func() { _ = authResp.Body.Close() }() + } + if authResp.StatusCode != http.StatusOK { + return "", fmt.Errorf("invalid status on auth request: %d", authResp.StatusCode) + } + ai := authInfo{} + if err := json.NewDecoder(authResp.Body).Decode(&ai); err != nil { + return "", fmt.Errorf("failed to parse auth response: %w", err) + } + if ai.AccessToken != "" { + ai.Token = ai.AccessToken + } + auth := fmt.Sprintf("Bearer %s", ai.Token) + if a.authCache != nil { + a.authCache[cacheKey] = auth + } + return auth, nil + } + return "", fmt.Errorf("failed to parse auth header, type=%s: %s", parsed.Type, header) +} + +func (a *api) addCachedAuth(req *http.Request) error { + if a.authCache == nil { + return nil + } + key, err := authCacheKey(*req) + if err != nil { + return err + } + if auth, ok := a.authCache[key]; ok { + req.Header.Set("Authorization", auth) + } + return nil +} + +var ( + authHeaderMatcher = regexp.MustCompile("(?i).*(bearer|basic).*") + authParamsMatcher = regexp.MustCompile(`([a-zA-z]+)="(.+?)"`) +) + +func parseAuthHeader(header string) (authHeader, error) { + // TODO: replace with a better parser, quotes should be optional, get character set from upstream http rfc + var parsed authHeader + parsed.Type = strings.ToLower(authHeaderMatcher.ReplaceAllString(header, "$1")) + if parsed.Type == "bearer" { + matches := authParamsMatcher.FindAllStringSubmatch(header, -1) + for _, match := range matches { + switch strings.ToLower(match[1]) { + case "realm": + parsed.Realm = match[2] + case "service": + parsed.Service = match[2] + case "scope": + parsed.Scope = match[2] + } + } + } + return parsed, nil +} + +var reRepo = regexp.MustCompile(`\/v2\/` + + `([a-z0-9]+(?:(?:\.|_|__|-+)[a-z0-9]+)*(?:\/[a-z0-9]+(?:(?:\.|_|__|-+)[a-z0-9]+)*)*)` + + `(\/(?:blobs|manifests|tags|referrers)\/(?:[^\/]+)|\/blobs\/uploads\/[^\/]*)`) + +func authCacheKey(req http.Request) (string, error) { + if req.URL == nil { + return "", fmt.Errorf("cannot compute auth cache key, URL is nil") + } + // auth cache key is the host, repo, and method, joined with a colon separator + host := req.URL.Host + repo := "" + if req.URL.Path != "/v2/" { + pathMatch := reRepo.FindStringSubmatch(req.URL.Path) + if len(pathMatch) < 2 { + return "", fmt.Errorf("could not extract repo from path") + } + repo = pathMatch[1] + } + // compatible methods are merged + method := req.Method + switch method { + case "GET", "HEAD": + method = "pull" + case "POST", "PATCH", "PUT": + method = "push" + } + return strings.Join([]string{host, repo, method}, ":"), nil +} + +type wrapTransport struct { + out io.Writer + orig http.RoundTripper +} + +func (wt *wrapTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if wt.out != nil { + if err := printRequest(req, wt.out); err != nil { + return nil, err + } + } + resp, err := wt.orig.RoundTrip(req) + if wt.out != nil { + if err == nil { + if err := printResponse(resp, wt.out); err != nil { + return resp, err + } + } + if err != nil { + if _, err := fmt.Fprintf(wt.out, "%s\n~~~ Error ~~~\n%s\n", strings.Repeat("-", 80), err.Error()); err != nil { + return resp, err + } + } + if _, err := fmt.Fprintf(wt.out, "%s\n", strings.Repeat("=", 80)); err != nil { + return resp, err + } + } + return resp, err +} + +type detectManifest struct { + MediaType string `json:"mediaType"` + Subject *image.Descriptor `json:"subject,omitempty"` +} + +func detectMediaType(body []byte) string { + det := detectManifest{ + MediaType: mtOCIImage, + } + _ = json.Unmarshal(body, &det) + return det.MediaType +} + +func detectSubject(body []byte) *image.Descriptor { + det := detectManifest{} + _ = json.Unmarshal(body, &det) + return det.Subject +} + +func cloneBodyReq(req *http.Request) ([]byte, error) { + if req.GetBody != nil { + rc, err := req.GetBody() + if err != nil { + return nil, err + } + out, err := io.ReadAll(rc) + _ = rc.Close() + return out, err + } + if req.Body == nil { + return []byte{}, nil + } + out, err := io.ReadAll(req.Body) + _ = req.Body.Close() + if err != nil { + return nil, err + } + // replace the body with a buffer so it can be reused + req.Body = io.NopCloser(bytes.NewReader(out)) + return out, err +} + +func cloneBodyResp(resp *http.Response) ([]byte, error) { + if resp.Body == nil { + return []byte{}, nil + } + out, err := io.ReadAll(resp.Body) + _ = resp.Body.Close() + if err != nil { + return nil, err + } + // replace the body with a buffer so it can be reused + resp.Body = io.NopCloser(bytes.NewReader(out)) + return out, err +} + +func mediaTypeBase(orig string) string { + base, _, _ := strings.Cut(orig, ";") + return strings.TrimSpace(strings.ToLower(base)) +} + +func printBody(body []byte, w io.Writer) error { + if len(body) == 0 { + if _, err := fmt.Fprintf(w, "--- Empty body ---\n"); err != nil { + return err + } + return nil + } + ct := http.DetectContentType(body) + switch mediaTypeBase(ct) { + case "application/json", "text/plain": + if _, err := fmt.Fprintf(w, "%.*s\n", truncateBody, string(body)); err != nil { + return err + } + if len(body) > truncateBody { + if _, err := fmt.Fprintf(w, "--- Truncated body from %d to %d bytes ---\n", len(body), truncateBody); err != nil { + return err + } + } + default: + if _, err := fmt.Fprintf(w, "--- Output of %s not supported, %d bytes not shown ---\n", ct, len(body)); err != nil { + return err + } + } + return nil +} + +func printHeaders(headers http.Header, w io.Writer) error { + if _, err := fmt.Fprintf(w, "Headers:\n"); err != nil { + return err + } + for _, k := range slices.Sorted(maps.Keys(headers)) { + if _, err := fmt.Fprintf(w, " %25s: %v\n", k, headers[k]); err != nil { + return err + } + } + return nil +} + +func printRequest(req *http.Request, w io.Writer) error { + if _, err := fmt.Fprintf(w, "%s\n~~~ REQUEST ~~~\n", strings.Repeat("=", 80)); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "Method: %s\nURL: %s\n", req.Method, req.URL.String()); err != nil { + return err + } + if err := printHeaders(req.Header, w); err != nil { + return err + } + body, err := cloneBodyReq(req) + if err != nil { + return err + } + if err := printBody(body, w); err != nil { + return err + } + + return nil +} + +func printResponse(resp *http.Response, w io.Writer) error { + if _, err := fmt.Fprintf(w, "%s\n~~~ RESPONSE ~~~\n", strings.Repeat("-", 80)); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "Status: %d\n", resp.StatusCode); err != nil { + return err + } + if err := printHeaders(resp.Header, w); err != nil { + return err + } + body, err := cloneBodyResp(resp) + if err != nil { + return err + } + if err := printBody(body, w); err != nil { + return err + } + + return nil +} + +type redactWriter struct { + w io.Writer +} + +var ( + redactRegexp = regexp.MustCompile(`(?i)("?\w*(?:authorization|token|state)\w*"?(?:=|:)\s*(?:\[)?\s*"?\s*(?:(?:bearer|basic)? )?)[^\s?&"\]]*`) + redactReplace = []byte("$1*****") +) + +func (rw redactWriter) Write(p []byte) (int, error) { + pRedact := redactRegexp.ReplaceAll(p, redactReplace) + n, err := rw.w.Write(pRedact) + if err != nil || n != len(pRedact) { + return 0, err + } + return len(p), nil +} diff --git a/conformance/config.go b/conformance/config.go new file mode 100644 index 00000000..28946f60 --- /dev/null +++ b/conformance/config.go @@ -0,0 +1,688 @@ +package main + +import ( + "fmt" + "io" + "os" + "reflect" + "runtime/debug" + "strconv" + "strings" + + "github.com/goccy/go-yaml" +) + +const ( + confGoTag = "conformance" + envOCIConf = "OCI" + envOCIConfFile = "OCI_CONFIGURATION" + envOCIVersion = "OCI_VERSION" + defaultOCIConf = "oci-conformance.yaml" + chunkMin = 1024 + truncateBody = 4096 + biVCSCommit = "vcs.revision" +) + +var Version = "unknown" + +type config struct { + Registry string `conformance:"REGISTRY" yaml:"registry"` // hostname:port of registry server + TLS tls `conformance:"TLS" yaml:"tls"` // tls configuration for communicating with the registry + Repo1 string `conformance:"REPO1" yaml:"repo1"` // first repository for pushing content + Repo2 string `conformance:"REPO2" yaml:"repo2"` // second repository for pushing content + LoginUser string `conformance:"USERNAME" yaml:"username"` // username for login, leave blank for anonymous + LoginPass string `conformance:"PASSWORD" yaml:"password"` // password for login, leave blank for anonymous + CacheAuth bool `conformance:"CACHE_AUTH" yaml:"cacheAuth"` // whether to allow auth to be cached and reused between requests + LogLevel string `conformance:"LOG" yaml:"logging"` // slog logging level, defaults to "warn" + LogWriter io.Writer `yaml:"-"` // writer used for logging, defaults to os.Stderr + FilterTest string `conformance:"FILTER_TEST" yaml:"filterTest,omitempty"` // only run tests with a given name prefix + APIs configAPI `conformance:"API" yaml:"apis"` // API tests to run + Data configData `conformance:"DATA" yaml:"data"` // data types to test + ROData configROData `conformance:"RO_DATA" yaml:"roData"` // read-only data for registries that do not support push methods + ResultsDir string `conformance:"RESULTS_DIR" yaml:"resultsDir"` // directory to write results + Version string `conformance:"VERSION" yaml:"version"` // spec version used to set test defaults + schemeReg string `yaml:"-"` // base for url to access the registry + Commit string `yaml:"commit"` // injected git commit hash from runtime + Legacy bool `yaml:"legacy,omitempty"` // injected to indicate that conformance was run with "go test" +} + +type tls int + +const ( + tlsEnabled tls = iota + tlsInsecure + tlsDisabled +) + +type configAPI struct { + Ping bool `conformance:"PING" yaml:"ping"` + Pull bool `conformance:"PULL" yaml:"pull"` + Push bool `conformance:"PUSH" yaml:"push"` + Blobs configBlobs `conformance:"BLOBS" yaml:"blobs"` + Manifests configManifests `conformance:"MANIFESTS" yaml:"manifests"` + Tags configTags `conformance:"TAGS" yaml:"tags"` + Referrer bool `conformance:"REFERRER" yaml:"referrer"` +} + +type configBlobs struct { + Atomic bool `conformance:"ATOMIC" yaml:"atomic"` + Delete bool `conformance:"DELETE" yaml:"delete"` + DigestHeader bool `conformance:"DIGEST_HEADER" yaml:"digestHeader"` + MountAnonymous bool `conformance:"MOUNT_ANONYMOUS" yaml:"mountAnonymous"` + UploadCancel bool `conformance:"UPLOAD_CANCEL" yaml:"uploadCancel"` +} + +type configManifests struct { + Atomic bool `conformance:"ATOMIC" yaml:"atomic"` + Delete bool `conformance:"DELETE" yaml:"delete"` + DigestHeader bool `conformance:"DIGEST_HEADER" yaml:"digestHeader"` + TagParam bool `conformance:"TAG_PARAM" yaml:"tagParam"` +} + +type configTags struct { + Atomic bool `conformance:"ATOMIC" yaml:"atomic"` + Delete bool `conformance:"DELETE" yaml:"delete"` + List bool `conformance:"LIST" yaml:"list"` +} + +type configData struct { + Image bool `conformance:"IMAGE" yaml:"image"` // standard OCI image + Index bool `conformance:"INDEX" yaml:"index"` // multi-platform manifest + IndexList bool `conformance:"INDEX_LIST" yaml:"indexList"` // nested index + Sparse bool `conformance:"SPARSE" yaml:"sparse"` // manifest where some descriptors have not been pushed + Artifact bool `conformance:"ARTIFACT" yaml:"artifact"` // OCI artifact + Subject bool `conformance:"SUBJECT" yaml:"subject"` // artifact with the subject defined + SubjectMissing bool `conformance:"SUBJECT_MISSING" yaml:"subjectMissing"` // artifact with a missing subject + ArtifactList bool `conformance:"ARTIFACT_LIST" yaml:"artifactList"` // index of artifacts + SubjectList bool `conformance:"SUBJECT_LIST" yaml:"subjectList"` // index with a subject + DataField bool `conformance:"DATA_FIELD" yaml:"dataField"` // data field in descriptor + Nondistributable bool `conformance:"NONDISTRIBUTABLE" yaml:"nondistributable"` // nondistributable image, deprecated in image-spec 1.1 + CustomFields bool `conformance:"CUSTOM_FIELDS" yaml:"customFields"` // fields added beyond the OCI spec + NoLayers bool `conformance:"NO_LAYERS" yaml:"noLayers"` // image manifest with an empty layer list + EmptyBlob bool `conformance:"EMPTY_BLOB" yaml:"emptyBlob"` // a zero byte blob + Sha512 bool `conformance:"SHA512" yaml:"sha512"` // sha512 digest algorithm +} + +type configROData struct { + Tags []string `conformance:"TAGS" yaml:"tags"` // tag names + Manifests []string `conformance:"MANIFESTS" yaml:"manifests"` // manifest digests + Blobs []string `conformance:"BLOBS" yaml:"blobs"` // blob digests + Referrers []string `conformance:"REFERRERS" yaml:"referrers"` // referrers subject digests +} + +func configLoad() (config, error) { + // read config from yaml file if available + loadFile := "" + configFile := []byte{} + if filename, ok := os.LookupEnv(envOCIConfFile); ok { + loadFile = filename + } else if fi, err := os.Stat(defaultOCIConf); err == nil && !fi.IsDir() { + loadFile = defaultOCIConf + } + if loadFile != "" { + fh, err := os.Open(loadFile) + if err != nil { + return config{}, err + } + configFile, err = io.ReadAll(fh) + _ = fh.Close() + if err != nil { + return config{}, err + } + } + // extract the version from the config file or env variable + configVersion := "" + if len(configFile) > 0 { + verStruct := struct { + Version string `yaml:"version"` + }{} + err := yaml.Unmarshal(configFile, &verStruct) + if err != nil { + return config{}, err + } + configVersion = verStruct.Version + } + configVersionEnv := os.Getenv(envOCIVersion) + if configVersionEnv != "" { + configVersion = configVersionEnv + } + // initialize config with default values based on spec version + c := config{ + Registry: "localhost:5000", + Repo1: "conformance/repo1", + Repo2: "conformance/repo2", + CacheAuth: true, + LogLevel: "warn", + LogWriter: os.Stderr, + ResultsDir: "./results", + APIs: configAPI{ + Ping: true, + Pull: true, + Push: true, + Blobs: configBlobs{ + Atomic: true, + Delete: true, + DigestHeader: false, + MountAnonymous: true, + UploadCancel: false, + }, + Manifests: configManifests{ + Atomic: true, + Delete: true, + DigestHeader: false, + TagParam: false, + }, + Tags: configTags{ + Atomic: true, + Delete: true, + List: true, + }, + Referrer: true, + }, + Data: configData{ + Image: true, + Index: true, + IndexList: true, + Sparse: false, + Artifact: true, + Subject: true, + SubjectMissing: true, + ArtifactList: true, + SubjectList: true, + DataField: true, + Nondistributable: true, + CustomFields: true, + NoLayers: true, + EmptyBlob: true, + Sha512: true, + }, + } + switch configVersion { + case "dev", "1.1+dev": + c.APIs.Blobs.UploadCancel = true + c.APIs.Blobs.DigestHeader = true + c.APIs.Manifests.DigestHeader = true + c.APIs.Manifests.TagParam = true + c.Version = "1.1+dev" + case "", "stable", "1.1": + c.Version = "1.1" + case "1.0": + c.APIs.Blobs.MountAnonymous = false + c.APIs.Referrer = false + c.Version = "1.0" + default: + return config{}, fmt.Errorf("unsupported config version %s", configVersion) + } + // process legacy variables but warn user when they are seen + err := confLegacyEnv(&c) + if err != nil { + return c, err + } + // read config from yaml file if available + if len(configFile) > 0 { + err := yaml.Unmarshal(configFile, &c) + if err != nil { + return c, err + } + } + // parse config from environment variables, overriding any yaml settings + err = confFromEnv(envOCIConf, confGoTag, reflect.ValueOf(&c)) + if err != nil { + return c, err + } + // setup computed values + scheme := "https" + if c.TLS == tlsDisabled { + scheme = "http" + } + c.schemeReg = fmt.Sprintf("%s://%s", scheme, c.Registry) + // load the commit from the build info + if bi, ok := debug.ReadBuildInfo(); ok && bi != nil { + for _, setting := range bi.Settings { + if setting.Key == biVCSCommit { + c.Commit = setting.Value + break + } + } + } + // fall back to version injected from the makefile ldflags parameter + if c.Commit == "" { + c.Commit = Version + } + return c, nil +} + +func (t tls) MarshalText() ([]byte, error) { + var s string + switch t { + default: + s = "enabled" // by default, TLS is enabled + case tlsInsecure: + s = "insecure" + case tlsDisabled: + s = "disabled" + } + return []byte(s), nil +} + +func (t *tls) UnmarshalText(b []byte) error { + switch strings.ToLower(string(b)) { + default: + *t = tlsEnabled + case "insecure": + *t = tlsInsecure + case "disabled": + *t = tlsDisabled + } + return nil +} + +func confFromEnv(env, tag string, vp reflect.Value) error { + vpt := vp.Type() + if vpt.Kind() != reflect.Pointer { + return fmt.Errorf("confFromEnv requires a pointer input") + } + if vp.IsZero() { + return nil // nil pointer + } + v := reflect.Indirect(vp) + if v.Kind() == reflect.Pointer { + // pointer to a pointer, recurse + return confFromEnv(env, tag, v) + } + if v.Kind() == reflect.Struct { + // expand each field, adding to prefix and recursing on pointer to the entry + for i := 0; i < v.NumField(); i++ { + vtf := v.Type().Field(i) + tagVal := vtf.Tag.Get(tag) + if tagVal != "" { + if !v.Field(i).CanAddr() { + return fmt.Errorf("unable to generate address on %s", v.Field(i).Type().Name()) + } + tagEnv := fmt.Sprintf("%s_%s", env, tagVal) + if err := confFromEnv(tagEnv, tag, v.Field(i).Addr()); err != nil { + return fmt.Errorf("field failed %q: %w", v.Field(i).Type().Name(), err) + } + } + } + return nil + } + + // get the value from the environment + val := os.Getenv(env) + if val == "" { + // skip undefined env variables + return nil + } + + // try to unmarshal with a built in method + if mt, ok := vp.Interface().(interface{ UnmarshalText(b []byte) error }); ok { + if err := mt.UnmarshalText([]byte(val)); err != nil { + return fmt.Errorf("failed to unmarshal %q: %w", env, err) + } + return nil + } + + // fall back to extracting by the kind + switch v.Kind() { + case reflect.String: + v.SetString(val) + case reflect.Bool: + b, err := strconv.ParseBool(val) + if err != nil { + return fmt.Errorf("failed to parse bool value from environment %s=%s", env, val) + } + v.SetBool(b) + case reflect.Slice: + switch v.Type().Elem().Kind() { + case reflect.String: + valSlice := strings.Split(val, " ") + newSlice := reflect.MakeSlice(v.Type(), len(valSlice), len(valSlice)) + for i, cur := range valSlice { + newSlice.Index(i).SetString(cur) + } + v.Set(newSlice) + default: + return fmt.Errorf("unsupported slice of kind: %s", v.Type().Elem().Kind()) + } + default: + // unhandled type + return fmt.Errorf("unsupported kind: %s", v.Kind()) + } + return nil +} + +func confLegacyEnv(c *config) error { + // Note: some legacy variables are not converted: + // export OCI_HIDE_SKIPPED_WORKFLOWS=0 + // export OCI_DELETE_MANIFEST_BEFORE_BLOBS=0 + if v := os.Getenv("OCI_ROOT_URL"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_ROOT_URL is deprecated, use OCI_REGISTRY and OCI_TLS instead\n") + v := strings.TrimSuffix(strings.TrimSpace(strings.ToLower(v)), "/") + vSplit := strings.SplitN(v, "://", 2) + scheme := "https" + reg := v + if len(vSplit) == 2 { + scheme = vSplit[0] + reg = vSplit[1] + } + switch scheme { + case "http": + c.TLS = tlsDisabled + default: + c.TLS = tlsEnabled + } + c.Registry = reg + } + if v := os.Getenv("OCI_NAMESPACE"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_NAMESPACE is deprecated, use OCI_REPO1 instead\n") + c.Repo1 = strings.TrimSuffix(strings.TrimSpace(strings.ToLower(v)), "/") + } + if v := os.Getenv("OCI_CROSSMOUNT_NAMESPACE"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_CROSSMOUNT_NAMESPACE is deprecated, use OCI_REPO2 instead\n") + c.Repo2 = strings.TrimSuffix(strings.TrimSpace(strings.ToLower(v)), "/") + } + if v := os.Getenv("OCI_TEST_PULL"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_PULL is deprecated, use OCI_API_PULL instead\n") + b, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("failed ot parse bool value from environment %s=%s", "OCI_TEST_PULL", v) + } + c.APIs.Pull = b + } + if v := os.Getenv("OCI_TEST_PUSH"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_PUSH is deprecated, use OCI_API_PUSH instead\n") + b, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("failed ot parse bool value from environment %s=%s", "OCI_TEST_PUSH", v) + } + c.APIs.Push = b + } + if v := os.Getenv("OCI_TEST_CONTENT_DISCOVERY"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_CONTENT_DISCOVERY is deprecated, use OCI_API_TAG_LIST and OCI_API_REFERRER instead\n") + b, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("failed ot parse bool value from environment %s=%s", "OCI_TEST_CONTENT_DISCOVERY", v) + } + c.APIs.Tags.List = b + c.APIs.Referrer = b + } + if v := os.Getenv("OCI_TEST_CONTENT_MANAGEMENT"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_CONTENT_MANAGEMENT is deprecated, use OCI_API_TAG_DELETE, OCI_API_MANIFEST_DELETE, and OCI_API_BLOB_DELETE instead\n") + b, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("failed ot parse bool value from environment %s=%s", "OCI_TEST_CONTENT_MANAGEMENT", v) + } + c.APIs.Tags.Delete = b + c.APIs.Manifests.Delete = b + c.APIs.Blobs.Delete = b + } + if v := os.Getenv("OCI_DEBUG"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_DEBUG is deprecated, use OCI_LOG=debug instead\n") + b, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("failed ot parse bool value from environment %s=%s", "OCI_DEBUG", v) + } + if b { + c.LogLevel = "debug" + } + } + if v := os.Getenv("OCI_TAG_NAME"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_TAG_NAME is deprecated, use OCI_RO_DATA_TAGS instead\n") + c.ROData.Tags = append(c.ROData.Tags, strings.Split(v, " ")...) + } + if v := os.Getenv("OCI_MANIFEST_DIGEST"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_MANIFEST_DIGEST is deprecated, use OCI_RO_DATA_MANIFESTS instead\n") + c.ROData.Manifests = append(c.ROData.Manifests, strings.Split(v, " ")...) + } + if v := os.Getenv("OCI_BLOB_DIGEST"); v != "" { + fmt.Fprintf(os.Stderr, "WARNING: OCI_BLOB_DIGEST is deprecated, use OCI_RO_DATA_BLOBS instead\n") + c.ROData.Blobs = append(c.ROData.Blobs, strings.Split(v, " ")...) + } + return nil +} + +func (c config) Redact() config { + // censor credentials + if c.LoginUser != "" { + c.LoginUser = "***" + } + if c.LoginPass != "" { + c.LoginPass = "***" + } + return c +} + +func (c config) Report() string { + b, err := yaml.Marshal(c.Redact()) + if err != nil { + return fmt.Sprintf("failed to marshal config: %v", err) + } + return string(b) +} + +var confHTMLTemplates = map[string]string{ + "report": ` + + OCI Distribution Conformance Tests + + + +

OCI Distribution Conformance Tests

+ {{- if .Config.Legacy }} +

WARNING: Running conformance with "go test" is deprecated, please update to "go build"

+ {{- end }} + {{ template "summary" . }} +
+ {{ template "results" .Results }} +
+ +`, + "summary": ` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Summary +
+ {{- if gt .NumPassed 0 -}} + + {{- if .AllPassed -}}All {{ end -}}{{ .NumPassed }} passed + {{- end -}} + {{- if gt .NumFailed 0 -}} + + {{- if .AllFailed -}}All {{ end -}}{{ .NumFailed }} failed + {{- end -}} + {{- if gt .NumSkipped 0 -}} + + {{- if .AllSkipped -}}All {{ end -}}{{ .NumSkipped }} skipped + {{- end -}} +
+
+
+
+
+
+
Start Time{{ .StartTimeString }}
End Time{{ .EndTimeString }}
Time Elapsed{{ .RunTime }}
Tested Spec{{ .Config.Version }}
Conformance Commit{{ .Config.Commit }}
Configuration
{{ .Config.Report }}
`, + "results": ` +
+

{{ .Name }}

+ {{- if ne .Output.String "" }} +

Output:

+
{{- html .Output.String -}}
+ {{- end }} + {{- if ne ( len .Errs ) 0 }} +

Errors:

+ {{- range $err := .Errs }} +
{{ html $err.Error }}
+ {{- end }} + {{- end }} + {{- range $result := .Children }} + {{template "results" $result }} + {{- end }} +
+
+ `, + "status-color": ` + {{- if eq .String "Pass" }}green + {{- else if eq .String "FAIL" }}red + {{- else if eq .String "Error" }}red + {{- else if eq .String "Skip" }}grey + {{- else if eq .String "Disabled" }}grey + {{- end }}`, +} diff --git a/conformance/errors.go b/conformance/errors.go new file mode 100644 index 00000000..3cb0b4b6 --- /dev/null +++ b/conformance/errors.go @@ -0,0 +1,11 @@ +package main + +import "errors" + +var ( + errAPITestDisabled = errors.New("API is disabled by user configuration") + errAPITestSkip = errors.New("API test was skipped") + errAPITestError = errors.New("API test encountered an internal error") + errAPITestFail = errors.New("API test with a known failure") + errRegUnsupported = errors.New("registry does not support the requested API") +) diff --git a/conformance/go.mod b/conformance/go.mod index 7ea42067..e51f4e45 100644 --- a/conformance/go.mod +++ b/conformance/go.mod @@ -1,26 +1,10 @@ module github.com/opencontainers/distribution-spec/conformance -go 1.20 +go 1.24.0 require ( - github.com/bloodorangeio/reggie v0.6.1 - github.com/google/uuid v1.3.0 - github.com/onsi/ginkgo/v2 v2.11.0 - github.com/onsi/gomega v1.27.8 + github.com/goccy/go-yaml v1.18.0 + github.com/opencontainers/distribution-spec/specs-go v0.0.0-20240926185104-8376368dd8aa github.com/opencontainers/go-digest v1.0.0 -) - -require ( - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/go-logr/logr v1.2.4 // indirect - github.com/go-resty/resty/v2 v2.7.0 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect + github.com/opencontainers/image-spec v1.1.1 ) diff --git a/conformance/go.sum b/conformance/go.sum index 105ad8c6..696b89f8 100644 --- a/conformance/go.sum +++ b/conformance/go.sum @@ -1,53 +1,8 @@ -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/bloodorangeio/reggie v0.6.1 h1:rSpfPN8oU9kflRI7aQVjImjhY5meRsXDIXnJQrr11zs= -github.com/bloodorangeio/reggie v0.6.1/go.mod h1:Jkvg7UBdlXVNOlvXU6hgysdtG1XNVCB3B4/k7+PtlfM= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= -github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs= -github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= -github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= -github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/opencontainers/distribution-spec/specs-go v0.0.0-20240926185104-8376368dd8aa h1:CUcrPKAP0lYp3xAHghAYBR6rFI+BaW0nWAvhHwjLQYM= +github.com/opencontainers/distribution-spec/specs-go v0.0.0-20240926185104-8376368dd8aa/go.mod h1:Va0IMqkjv62YSEytL4sgxrkiD9IzU0T0bX/ZZEtMnSQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= diff --git a/conformance/image.go b/conformance/image.go deleted file mode 100644 index 23c8fcc2..00000000 --- a/conformance/image.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright contributors to the Open Containers Distribution Specification -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package conformance - -import ( - digest "github.com/opencontainers/go-digest" -) - -// These types are copied from github.com/opencontainers/image-spec/specs-go/v1 -// Modifications have been made to remove fields that aren't used in these -// conformance tests, and to add new unspecified fields, to test registry -// conformance in handling unknown fields. - -// manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. -type manifest struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` - - // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` - MediaType string `json:"mediaType,omitempty"` - - // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. - ArtifactType string `json:"artifactType,omitempty"` - - // Config references a configuration object for a container, by digest. - // The referenced configuration object is a JSON blob that the runtime uses to set up the container. - Config descriptor `json:"config"` - - // Layers is an indexed list of layers referenced by the manifest. - Layers []descriptor `json:"layers"` - - // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. - Subject *descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` -} - -// descriptor describes the disposition of targeted content. -// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype -// when marshalled to JSON. -type descriptor struct { - // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType"` - - // Digest is the digest of the targeted content. - Digest digest.Digest `json:"digest"` - - // Size specifies the size in bytes of the blob. - Size int64 `json:"size"` - - // URLs specifies a list of URLs from which this object MAY be downloaded - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Data specifies the data of the object described by the descriptor. - Data []byte `json:"data,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // - // This should only be used when referring to a manifest. - Platform *platform `json:"platform,omitempty"` - - // ArtifactType is the IANA media type of this artifact. - ArtifactType string `json:"artifactType,omitempty"` - - // NewUnspecifiedField is not covered by image-spec. - // Registry implementations should still successfully store and serve - // manifests containing this data. - NewUnspecifiedField []byte `json:"newUnspecifiedField"` -} - -// platform describes the platform which the image in the manifest runs on. -type platform struct { - // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64le`. - Architecture string `json:"architecture"` - - // OS specifies the operating system, for example `linux` or `windows`. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` - - // Variant is an optional field specifying a variant of the CPU, for - // example `v7` to specify ARMv7 when architecture is `arm`. - Variant string `json:"variant,omitempty"` -} - -// rootFS describes a layer content addresses -type rootFS struct { - // Type is the type of the rootfs. - Type string `json:"type"` - - // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. - DiffIDs []digest.Digest `json:"diff_ids"` -} - -// image is the JSON structure which describes some basic information about the image. -// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type image struct { - // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. - Author string `json:"author,omitempty"` - - // Architecture is the CPU architecture which the binaries in this image are built to run on. - Architecture string `json:"architecture"` - - // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. - Variant string `json:"variant,omitempty"` - - // OS is the name of the operating system which the image is built to run on. - OS string `json:"os"` - - // RootFS references the layer content addresses used by the image. - RootFS rootFS `json:"rootfs"` -} - -// index references manifests for various platforms. -// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. -type index struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` - - // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` - MediaType string `json:"mediaType,omitempty"` - - // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. - ArtifactType string `json:"artifactType,omitempty"` - - // Manifests references platform specific manifests. - Manifests []descriptor `json:"manifests"` - - // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. - Subject *descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/conformance/junit.go b/conformance/junit.go new file mode 100644 index 00000000..de385b07 --- /dev/null +++ b/conformance/junit.go @@ -0,0 +1,61 @@ +package main + +import "encoding/xml" + +const ( + junitPassed = "passed" // successful test + junitSkipped = "skipped" // test intentionally skipped + junitFailure = "failure" // test ran but failed, e.g. missed assertion + junitError = "error" // test encountered an unexpected error +) + +type junitProperty struct { + Name string `xml:"name,attr"` // name or key + Value string `xml:"value,attr"` // value of name +} + +type junitResult struct { + Message string `xml:"message,attr"` + Type string `xml:"type,attr,omitempty"` + Data string `xml:",cdata"` +} + +type junitTest struct { + Name string `xml:"name,attr"` // name of the test + Classname string `xml:"classname,attr"` // hierarch of test + Time string `xml:"time,attr,omitempty"` // duration in seconds + Status string `xml:"status,attr,omitempty"` // passed, skipped, failure, or error + Skipped *junitResult `xml:"skipped,omitempty"` // result from skipped tests + Failure *junitResult `xml:"failure,omitempty"` // result from test failures + Error *junitResult `xml:"error,omitempty"` // result from test errors + SystemOut string `xml:"system-out,omitempty"` // output written to stdout + SystemErr string `xml:"system-err,omitempty"` // output written to stderr +} + +type junitTestSuite struct { + Name string `xml:"name,attr"` // name of suite + Package string `xml:"package,attr,omitempty"` // hierarchy of suite + Tests int `xml:"tests,attr"` // count of tests + Failures int `xml:"failures,attr"` // count of failures + Errors int `xml:"errors,attr"` // count of errors + Disabled int `xml:"disabled,attr,omitempty"` // count of disabled tests + Skipped int `xml:"skipped,attr,omitempty"` // count of skipped tests + Time string `xml:"time,attr"` // duration in seconds + Timestamp string `xml:"timestamp,attr,omitempty"` // ISO8601 + Properties []junitProperty `xml:"properties>property,omitempty"` // mapping of key/value pairs associated with the test + Testcases []junitTest `xml:"testcase,omitempty"` // slice of tests + SystemOut string `xml:"system-out,omitempty"` // output written to stdout + SystemErr string `xml:"system-err,omitempty"` // output written to stderr +} + +type junitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` // xml namespace and name + Name string `xml:"name,attr,omitempty"` // name of the collection of suites + Time string `xml:"time,attr,omitempty"` // duration in seconds + Tests int `xml:"tests,attr,omitempty"` // count of tests + Errors int `xml:"errors,attr,omitempty"` // count of errors + Failures int `xml:"failures,attr,omitempty"` // count of failures + Skipped int `xml:"skipped,attr,omitempty"` // count of skipped tests + Disabled int `xml:"disabled,attr,omitempty"` // count of disabled tests + Suites []junitTestSuite `xml:"testsuite,omitempty"` // slice of suites +} diff --git a/conformance/legacy_test.go b/conformance/legacy_test.go new file mode 100644 index 00000000..ca610e99 --- /dev/null +++ b/conformance/legacy_test.go @@ -0,0 +1,9 @@ +//go:build legacy || !unit_tests + +package main + +import "testing" + +func TestLegacy(t *testing.T) { + mainRun(true) +} diff --git a/conformance/main.go b/conformance/main.go new file mode 100644 index 00000000..e6a66f3e --- /dev/null +++ b/conformance/main.go @@ -0,0 +1,84 @@ +package main + +import ( + "errors" + "fmt" + "os" + "path/filepath" +) + +func main() { + mainRun(false) +} + +func mainRun(legacy bool) { + // load config + c, err := configLoad() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to load config: %v\n", err) + return + } + c.Legacy = legacy + // run all tests + r, err := runnerNew(c) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to setup test: %v\n", err) + return + } + err = r.TestAll() + if err != nil && !errors.Is(err, errRegUnsupported) && !errors.Is(err, errAPITestFail) && + !errors.Is(err, errAPITestSkip) && !errors.Is(err, errAPITestDisabled) { + fmt.Fprintf(os.Stderr, "failed to run tests: %v", err) + } + // show results + r.Report(os.Stdout) + // generate reports + err = os.MkdirAll(c.ResultsDir, 0o755) + if err != nil { + fmt.Fprintf(os.Stderr, "failed create results directory: %v\n", err) + return + } + // write results.yaml + fh, err := os.Create(filepath.Join(c.ResultsDir, "results.yaml")) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to create results.yaml: %v\n", err) + return + } + err = r.ReportResultsYAML(fh) + _ = fh.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to generate results.yaml: %v\n", err) + return + } + // write junit.xml report + fh, err = os.Create(filepath.Join(c.ResultsDir, "junit.xml")) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to create junit.xml: %v\n", err) + return + } + err = r.ReportJunit(fh) + _ = fh.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to generate junit.xml: %v\n", err) + return + } + // write report.html + fh, err = os.Create(filepath.Join(c.ResultsDir, "report.html")) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to create report.html: %v\n", err) + return + } + err = r.ReportHTML(fh) + _ = fh.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to generate report.html: %v\n", err) + return + } + if c.Legacy { + fmt.Fprintf(os.Stderr, "WARNING: \"go test\" is deprecated. Please update to using \"go build\".\n") + } + if r.Results.Status != statusPass { + fmt.Fprintf(os.Stderr, "*** Conformance test detected a failure. ***\n") + os.Exit(1) + } +} diff --git a/conformance/reporter.go b/conformance/reporter.go deleted file mode 100644 index d5dcb5bb..00000000 --- a/conformance/reporter.go +++ /dev/null @@ -1,634 +0,0 @@ -// Copyright contributors to the Open Containers Distribution Specification -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package conformance - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "math" - "os" - "path/filepath" - "regexp" - "strings" - "time" - - "github.com/onsi/ginkgo/v2/types" -) - -const ( - suiteIndex = 1 - categoryIndex = 2 - setupString = "Setup" - htmlTemplate string = ` - - OCI Distribution Conformance Tests - - - - -

OCI Distribution Conformance Tests

- - - - - - - - - - - - - - - - - - - - - - - - - - - -
Summary -
- {{- if gt .NumPassed 0 -}} - - {{- if .AllPassed -}}All {{ end -}}{{ .NumPassed }} passed - {{- end -}} - {{- if gt .NumFailed 0 -}} - - {{- if .AllFailed -}}All {{ end -}}{{ .NumFailed }} failed - {{- end -}} - {{- if gt .NumSkipped 0 -}} - - {{- if .AllSkipped -}}All {{ end -}}{{ .NumSkipped }} skipped - {{- end -}} -
-
-
-
-
-
-
Start Time{{ .StartTimeString }}
End Time{{ .EndTimeString }}
Time Elapsed{{ .RunTime }}
Test Version{{ .Version }}
Configuration
- {{ range $i, $s := .EnvironmentVariables }} - {{ $s }}
- {{ end }} -
- -
- {{with .Suite}} - {{$suite := .M}} - {{range $i, $suiteKey := .Keys}} - {{$wf := index $suite $suiteKey}} - {{with $wf}} - {{ if .IsEnabled }} -

{{$suiteKey}}

-
- {{$workflow := .M}} - {{range $j, $workflowKey := .Keys}} -

{{$workflowKey}}

- {{$ctg := index $workflow $workflowKey}} - {{with $ctg}} - {{$category := .M}} - {{range $k, $categoryKey := .Keys}} - {{$s := index $category $categoryKey}} - {{if eq $s.State.String "failed"}} -
-
+
-

{{$s.Title}}

-
-
- -
-
{{$s.FailureMessage}}
-
-
- {{else if eq $s.State.String "passed"}} -
-
+
-

{{$s.Title}}

-
- -
- {{else if eq $s.State.String "skipped"}} -
-
+
-

{{$s.Title}}

-
- -
- {{else}} -
-
+
-

{{$s.Title}}

-
- -
- {{end}} - {{end}}
- {{end}} - {{end}} - {{end}} - {{end}} -
- {{end}} - {{end}} -
- - -` -) - -type ( - summaryMap struct { - M map[string]snapShotList - Keys []string - Size int - } - - suite struct { - M map[string]*workflow - Keys []string - Size int - } - - workflow struct { - M map[string]*category - IsEnabled bool - Keys []string - } - - category struct { - M map[string]specSnapshot - Keys []string - } - - specSnapshot struct { - types.SpecReport - ID int - Title string - Category string - Suite string - IsSetup bool - } - - snapShotList []specSnapshot - - httpDebugWriter struct { - CapturedOutput []string - debug bool - } - - httpDebugLogger struct { - l *log.Logger - w io.Writer - } - - HTMLReporter struct { - htmlReportFilename string - Suite suite - SpecSummaryMap summaryMap - EnvironmentVariables []string - Report types.Report - debugLogger *httpDebugWriter - debugIndex int - enabledMap map[string]bool - NumTotal int - NumPassed int - NumFailed int - NumSkipped int - PercentPassed int - PercentFailed int - PercentSkipped int - startTime time.Time - endTime time.Time - StartTimeString string - EndTimeString string - RunTime string - AllPassed bool - AllFailed bool - AllSkipped bool - Version string - } -) - -func (sm *summaryMap) Add(key string, sum *specSnapshot) { - sm.M[key] = append(sm.M[key], *sum) - sm.Size++ - - if !sm.containsKey(key) { - sm.Keys = append(sm.Keys, key) - } -} - -func (sm *summaryMap) containsKey(key string) bool { - var containsKey bool - for _, k := range sm.Keys { - if k == key { - containsKey = true - break - } - } - return containsKey -} - -func newHTTPDebugWriter(debug bool) *httpDebugWriter { - return &httpDebugWriter{debug: debug} -} - -func (writer *httpDebugWriter) Write(b []byte) (int, error) { - s := string(b) - writer.CapturedOutput = append(writer.CapturedOutput, s) - if writer.debug { - fmt.Println(s) - } - - return len(b), nil -} - -func newHTTPDebugLogger(f io.Writer) *httpDebugLogger { - debugLogger := &httpDebugLogger{w: f, l: log.New(f, "", log.Ldate|log.Lmicroseconds)} - return debugLogger -} - -func (l *httpDebugLogger) Errorf(format string, v ...interface{}) { - l.output("ERROR "+format, v...) -} - -func (l *httpDebugLogger) Warnf(format string, v ...interface{}) { - l.output("WARN "+format, v...) -} - -func (l *httpDebugLogger) Debugf(format string, v ...interface{}) { - l.output("DEBUG "+format, v...) -} - -var ( - redactRegexp = regexp.MustCompile(`(?i)("?\w*(authorization|token|state)\w*"?(:|=)\s*)(")?\s*((bearer|basic)? )?[^\s&"]*(")?`) - redactReplace = "$1$4$5*****$7" -) - -func (l *httpDebugLogger) output(format string, v ...interface{}) { - if len(v) == 0 { - l.l.Print(redactRegexp.ReplaceAllString(format, redactReplace)) - return - } - _, err := l.w.Write([]byte(redactRegexp.ReplaceAllString(fmt.Sprintf(format, v...), redactReplace))) - if err != nil { - l.Errorf(err.Error()) - } -} - -func newHTMLReporter(htmlReportFilename string) (h *HTMLReporter) { - enabledMap := map[string]bool{ - titlePull: true, - titlePush: true, - titleContentDiscovery: true, - titleContentManagement: true, - } - - if os.Getenv(envVarHideSkippedWorkflows) == "1" { - enabledMap = map[string]bool{ - titlePull: !userDisabled(pull), - titlePush: !userDisabled(push), - titleContentDiscovery: !userDisabled(contentDiscovery), - titleContentManagement: !userDisabled(contentManagement), - } - } - - varsToCheck := []string{ - envVarRootURL, - envVarNamespace, - envVarUsername, - envVarPassword, - envVarDebug, - envVarPull, - envVarPush, - envVarContentDiscovery, - envVarContentManagement, - envVarPushEmptyLayer, - envVarBlobDigest, - envVarManifestDigest, - envVarTagName, - envVarTagList, - envVarHideSkippedWorkflows, - envVarAuthScope, - envVarCrossmountNamespace, - } - envVars := []string{} - for _, v := range varsToCheck { - var replacement string - if envVar := os.Getenv(v); envVar != "" { - replacement = envVar - if strings.Contains(v, "PASSWORD") || strings.Contains(v, "USERNAME") { - replacement = "*****" - } - } else { - continue - } - envVars = append(envVars, - fmt.Sprintf("%s=%s", v, replacement)) - } - - return &HTMLReporter{ - htmlReportFilename: htmlReportFilename, - debugLogger: httpWriter, - enabledMap: enabledMap, - SpecSummaryMap: summaryMap{M: make(map[string]snapShotList)}, - Suite: suite{ - M: make(map[string]*workflow), - Keys: []string{}, - }, - EnvironmentVariables: envVars, - startTime: time.Now(), - StartTimeString: time.Now().Format("Jan 2 15:04:05.000 -0700 MST"), - Version: Version, - } -} - -func (reporter *HTMLReporter) afterReport(r types.SpecReport) { - b := new(bytes.Buffer) - for _, co := range httpWriter.CapturedOutput[reporter.debugIndex:] { - fmt.Fprintf(b, "%s\n", co) - } - r.CapturedStdOutErr = b.String() - reporter.debugIndex = len(reporter.debugLogger.CapturedOutput) - - ct := r.ContainerHierarchyTexts - suiteName, categoryName, titleText := ct[suiteIndex], ct[categoryIndex], r.LeafNodeText - suite := &reporter.Suite - //make the map of categories - if _, ok := suite.M[suiteName]; !ok { - suite.M[suiteName] = &workflow{M: make(map[string]*category), Keys: []string{}, - IsEnabled: reporter.enabledMap[suiteName]} - suite.Keys = append(suite.Keys, suiteName) - } - //make the map of snapshots - if _, ok := suite.M[suiteName].M[categoryName]; !ok { - suite.M[suiteName].M[categoryName] = &category{M: make(map[string]specSnapshot), Keys: []string{}} - z := suite.M[suiteName] - z.Keys = append(z.Keys, categoryName) - } - z := suite.M[suiteName].M[categoryName] - z.Keys = append(z.Keys, titleText) - - suite.M[suiteName].M[categoryName].M[titleText] = specSnapshot{ - SpecReport: r, - Suite: suiteName, - Category: categoryName, - Title: titleText, - ID: suite.Size, - IsSetup: (categoryName == setupString), - } - suite.Size++ -} - -func (reporter *HTMLReporter) endSuite(report types.Report) error { - if reporter.htmlReportFilename == "" { - // Reporting is disabled. - return nil - } - reporter.Report = report - reporter.endTime = time.Now() - reporter.EndTimeString = reporter.endTime.Format("Jan 2 15:04:05.000 -0700 MST") - reporter.RunTime = reporter.endTime.Sub(reporter.startTime).String() - reporter.NumTotal = len(report.SpecReports) - reporter.NumPassed = report.SpecReports.CountWithState(types.SpecStatePassed) - reporter.NumSkipped = report.SpecReports.CountWithState(types.SpecStateSkipped) - reporter.NumFailed = report.SpecReports.CountWithState(types.SpecStateFailed) - reporter.PercentPassed = getPercent(reporter.NumPassed, reporter.NumTotal) - reporter.PercentSkipped = getPercent(reporter.NumSkipped, reporter.NumTotal) - reporter.PercentFailed = getPercent(reporter.NumFailed, reporter.NumTotal) - reporter.AllPassed = reporter.NumPassed == reporter.NumTotal - reporter.AllSkipped = reporter.NumSkipped == reporter.NumTotal - reporter.AllFailed = reporter.NumFailed == reporter.NumTotal - - t, err := template.New("report").Parse(htmlTemplate) - if err != nil { - return fmt.Errorf("cannot parse report template: %v", err) - } - - htmlReportFilenameAbsPath, err := filepath.Abs(reporter.htmlReportFilename) - if err != nil { - return err - } - - htmlReportFile, err := os.Create(htmlReportFilenameAbsPath) - if err != nil { - return err - } - defer htmlReportFile.Close() - - err = t.ExecuteTemplate(htmlReportFile, "report", &reporter) - if err != nil { - return err - } - - fmt.Printf("\nHTML report was created: %s", htmlReportFilenameAbsPath) - return nil -} - -func getPercent(i, of int) int { - return int(math.Round(float64(i) / float64(of) * 100)) -} diff --git a/conformance/results.go b/conformance/results.go new file mode 100644 index 00000000..560dcd97 --- /dev/null +++ b/conformance/results.go @@ -0,0 +1,161 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" + "time" +) + +type results struct { + Name string // name of current runner step, concatenated onto the parent's name + Children []*results + Parent *results + Status status + Errs []error + Output *bytes.Buffer + Start time.Time + Stop time.Time + Counts [statusMax]int +} + +func resultsNew(name string, parent *results) *results { + fullName := name + if parent != nil && parent.Name != "" { + fullName = fmt.Sprintf("%s/%s", parent.Name, name) + } + return &results{ + Name: fullName, + Parent: parent, + Output: &bytes.Buffer{}, + Start: time.Now(), + } +} + +func (r *results) Count(s string) int { + st := statusUnknown + err := st.UnmarshalText([]byte(s)) + if err != nil || st < 0 || st >= statusMax { + return -1 + } + return r.Counts[st] +} + +func (r *results) ReportWalkErr(w io.Writer, prefix string) { + _, _ = fmt.Fprintf(w, "%s%s: %s\n", prefix, r.Name, r.Status) + if len(r.Children) == 0 && len(r.Errs) > 0 { + // show errors from leaf nodes + for _, err := range r.Errs { + _, _ = fmt.Fprintf(w, "%s - %s\n", prefix, err.Error()) + } + } + if len(r.Children) > 0 { + for _, child := range r.Children { + child.ReportWalkErr(w, prefix+" ") + } + } +} + +func (r *results) ToJunitTestCases() []junitTest { + jTests := []junitTest{} + if len(r.Children) == 0 { + // return the test case for a leaf node + jTest := junitTest{ + Name: r.Name, + Time: fmt.Sprintf("%f", r.Stop.Sub(r.Start).Seconds()), + SystemErr: r.Output.String(), + Status: r.Status.ToJunit(), + } + if len(r.Errs) > 0 { + jTest.SystemOut = fmt.Sprintf("%v", errors.Join(r.Errs...)) + } + jTests = append(jTests, jTest) + } + if len(r.Children) > 0 { + // recursively collect test cases from child nodes + for _, child := range r.Children { + jTests = append(jTests, child.ToJunitTestCases()...) + } + } + return jTests +} + +type status int + +const ( + statusUnknown status = iota // status is undefined + statusDisabled // test was disabled by configuration + statusSkip // test was skipped + statusPass // test passed + statusFail // test detected a conformance failure + statusError // failure of the test engine itself + statusMax // only used for allocating arrays +) + +func (s status) Set(set status) status { + // only set status to a higher level + if set > s { + return set + } + return s +} + +func (s status) String() string { + switch s { + case statusPass: + return "Pass" + case statusSkip: + return "Skip" + case statusDisabled: + return "Disabled" + case statusFail: + return "FAIL" + case statusError: + return "Error" + default: + return "Unknown" + } +} + +func (s status) MarshalText() ([]byte, error) { + ret := s.String() + if ret == "Unknown" { + return []byte(ret), fmt.Errorf("unknown status %d", s) + } + return []byte(ret), nil +} + +func (s *status) UnmarshalText(text []byte) error { + switch strings.ToLower(string(text)) { + case "pass": + *s = statusPass + case "skip": + *s = statusSkip + case "disabled": + *s = statusDisabled + case "fail": + *s = statusFail + case "error": + *s = statusError + case "unknown": + *s = statusUnknown + default: + return fmt.Errorf("unknown status %s", string(text)) + } + return nil +} + +func (s status) ToJunit() string { + switch s { + case statusPass: + return junitPassed + case statusSkip, statusDisabled: + return junitSkipped + case statusFail: + return junitFailure + default: + return junitError + } +} diff --git a/conformance/run.go b/conformance/run.go new file mode 100644 index 00000000..5ce662c3 --- /dev/null +++ b/conformance/run.go @@ -0,0 +1,2437 @@ +package main + +import ( + "crypto/rand" + "encoding/xml" + "errors" + "fmt" + "html/template" + "io" + "log/slog" + "math" + "net/http" + "os" + "slices" + "sort" + "strconv" + "strings" + "time" + + "github.com/goccy/go-yaml" + digest "github.com/opencontainers/go-digest" + image "github.com/opencontainers/image-spec/specs-go/v1" +) + +const ( + testName = "OCI Conformance Test" +) + +var dataTests = []string{} + +type runner struct { + Config config + API *api + State *state + Results *results + Log *slog.Logger +} + +func runnerNew(c config) (*runner, error) { + lvl := slog.LevelWarn + if c.LogLevel != "" { + err := lvl.UnmarshalText([]byte(c.LogLevel)) + if err != nil { + return nil, fmt.Errorf("failed to parse logging level %s: %w", c.LogLevel, err) + } + } + if c.LogWriter == nil { + c.LogWriter = os.Stderr + } + apiOpts := []apiOpt{} + if c.LoginUser != "" && c.LoginPass != "" { + apiOpts = append(apiOpts, apiWithAuth(c.LoginUser, c.LoginPass, c.CacheAuth)) + } + r := runner{ + Config: c, + API: apiNew(http.DefaultClient, apiOpts...), + State: stateNew(), + Results: resultsNew(testName, nil), + Log: slog.New(slog.NewTextHandler(c.LogWriter, &slog.HandlerOptions{Level: lvl})), + } + for api := range stateAPIMax { + if err := r.APIRequire(api); errors.Is(err, errAPITestDisabled) { + r.State.APIStatus[api] = statusDisabled + } + } + return &r, nil +} + +func (r *runner) GenerateData() error { + var tdName string + if !r.Config.Data.Image { + // all data tests require the image manifest + return nil + } + // include empty tests for user provided read-only data, no validation is done on the content of the response since we don't know it + if len(r.Config.ROData.Tags) > 0 || len(r.Config.ROData.Manifests) > 0 || len(r.Config.ROData.Blobs) > 0 || len(r.Config.ROData.Referrers) > 0 { + tdName = "read-only" + r.State.Data[tdName] = newTestData("Read Only Inputs") + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + for _, tag := range r.Config.ROData.Tags { + r.State.Data[tdName].tags[tag] = "" + } + for _, manifest := range r.Config.ROData.Manifests { + dig, err := digest.Parse(manifest) + if err != nil { + return fmt.Errorf("failed to parse manifest digest %s: %w", manifest, err) + } + r.State.Data[tdName].manifests[dig] = []byte{} + r.State.Data[tdName].manOrder = append(r.State.Data[tdName].manOrder, dig) + } + for _, blob := range r.Config.ROData.Blobs { + dig, err := digest.Parse(blob) + if err != nil { + return fmt.Errorf("failed to parse blob digest %s: %w", blob, err) + } + r.State.Data[tdName].blobs[dig] = []byte{} + } + for _, subject := range r.Config.ROData.Referrers { + dig, err := digest.Parse(subject) + if err != nil { + return fmt.Errorf("failed to parse subject digest %s: %w", subject, err) + } + r.State.Data[tdName].referrers[dig] = []*image.Descriptor{} + } + } + if !r.Config.APIs.Push { + // do not generate random data if push is disabled + return nil + } + // standard image with a layer per blob test + tdName = "image" + r.State.Data[tdName] = newTestData("Image") + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err := r.State.Data[tdName].genManifestFull( + genWithTag("image"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + tdName = "image-uncompressed" + r.State.Data[tdName] = newTestData("Image Uncompressed") + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err = r.State.Data[tdName].genManifestFull( + genWithTag("image-uncompressed"), + genWithCompress(genCompUncomp), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + // large manifest using a lot of annotations + tdName = "large-manifest" + r.State.Data[tdName] = newTestData("Image with Large Manifest") + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + largeAnnotations := map[string]string{} + for i := range 390 { + largeAnnotations[fmt.Sprintf("large-annotation-%d", i)] = strings.Repeat("A", 10000) + } + _, err = r.State.Data[tdName].genManifestFull( + genWithTag("large-manifest"), + genWithAnnotations(largeAnnotations), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + // multi-platform index + tdName = "index" + r.State.Data[tdName] = newTestData("Index") + if r.Config.Data.Index { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err = r.State.Data[tdName].genIndexFull( + genWithTag("index"), + genWithPlatforms([]*image.Platform{ + {OS: "linux", Architecture: "amd64"}, + {OS: "linux", Architecture: "arm64"}, + }), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // index containing an index + tdName = "nested-index" + r.State.Data[tdName] = newTestData("Nested Index") + if r.Config.Data.Index && r.Config.Data.IndexList { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + dig1, err := r.State.Data[tdName].genIndexFull( + genWithPlatforms([]*image.Platform{ + {OS: "linux", Architecture: "amd64"}, + {OS: "linux", Architecture: "arm64"}, + }), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + dig2, err := r.State.Data[tdName].genIndexFull( + genWithPlatforms([]*image.Platform{ + {OS: "linux", Architecture: "amd64"}, + {OS: "linux", Architecture: "arm64"}, + }), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + _, _, err = r.State.Data[tdName].genIndex([]*image.Platform{nil, nil}, []digest.Digest{dig1, dig2}, + genWithTag("index-of-index"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // empty index + tdName = "empty-index" + r.State.Data[tdName] = newTestData("Empty Index") + if r.Config.Data.Index { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err = r.State.Data[tdName].genIndexFull( + genWithTag("index"), + genWithPlatforms([]*image.Platform{}), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // artifact manifest + tdName = "artifact" + r.State.Data[tdName] = newTestData("Artifact") + if r.Config.Data.Artifact { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err = r.State.Data[tdName].genManifestFull( + genWithTag("artifact"), + genWithArtifactType(mtExampleConf1), + genWithConfigMediaType(mtOCIEmptyJSON), + genWithConfigBytes([]byte("{}")), + genWithLayerCount(1), + genWithLayerMediaType(mtExampleConf1), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // artifact index + tdName = "artifact-index" + r.State.Data[tdName] = newTestData("Artifact Index") + if r.Config.Data.ArtifactList { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err = r.State.Data[tdName].genIndexFull( + genWithTag("artifact-index"), + genWithPlatforms([]*image.Platform{ + {OS: "linux", Architecture: "amd64"}, + {OS: "linux", Architecture: "arm64"}, + }), + genWithArtifactType(mtExampleConf1), + genWithConfigMediaType(mtOCIEmptyJSON), + genWithConfigBytes([]byte("{}")), + genWithLayerCount(1), + genWithLayerMediaType(mtExampleConf1), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // artifact without layers + tdName = "artifact-without-layers" + r.State.Data[tdName] = newTestData("Artifact without Layers") + if r.Config.Data.Artifact { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err = r.State.Data[tdName].genManifestFull( + genWithTag("artifact-without-layers"), + genWithArtifactType(mtExampleConf1), + genWithConfigMediaType(mtOCIEmptyJSON), + genWithConfigBytes([]byte("{}")), + genWithLayerCount(1), + genWithLayerBytes([]byte("{}")), + genWithLayerMediaType(mtOCIEmptyJSON), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // image and referrers + tdName = "artifacts-with-subject" + r.State.Data[tdName] = newTestData("Artifacts with Subject") + if r.Config.Data.Subject { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + subjDig, err := r.State.Data[tdName].genManifestFull( + genWithTag("image"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + subjDesc := *r.State.Data[tdName].desc[subjDig] + _, err = r.State.Data[tdName].genManifestFull( + genWithSubject(subjDesc), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + _, err = r.State.Data[tdName].genManifestFull( + genWithArtifactType(mtExampleConf1), + genWithAnnotations(map[string]string{ + "org.opencontainers.conformance": "hello conformance test", + }), + genWithAnnotationUniq(), + genWithConfigMediaType(mtOCIEmptyJSON), + genWithConfigBytes([]byte("{}")), + genWithLayerCount(1), + genWithLayerMediaType(mtExampleConf1), + genWithSubject(subjDesc), + genWithTag("tagged-artifact1"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + _, err = r.State.Data[tdName].genManifestFull( + genWithArtifactType(mtExampleConf2), + genWithAnnotations(map[string]string{ + "org.opencontainers.conformance": "hello conformance test", + }), + genWithAnnotationUniq(), + genWithConfigMediaType(mtOCIEmptyJSON), + genWithConfigBytes([]byte("{}")), + genWithLayerCount(1), + genWithLayerMediaType(mtOctetStream), + genWithSubject(subjDesc), + genWithTag("tagged-artifact2"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // index and artifact-index with a subject + tdName = "index-with-subject" + r.State.Data[tdName] = newTestData("Index with Subject") + if r.Config.Data.SubjectList { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + subjDig, err := r.State.Data[tdName].genIndexFull( + genWithTag("index"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + subjDesc := *r.State.Data[tdName].desc[subjDig] + _, err = r.State.Data[tdName].genIndexFull( + genWithArtifactType(mtExampleConf1), + genWithAnnotations(map[string]string{ + "org.opencontainers.conformance": "hello conformance test", + }), + genWithAnnotationUniq(), + genWithConfigMediaType(mtOCIEmptyJSON), + genWithConfigBytes([]byte("{}")), + genWithLayerCount(1), + genWithLayerMediaType(mtExampleConf1), + genWithSubject(subjDesc), + genWithTag("tagged-artifact"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // artifact with missing subject + tdName = "missing-subject" + r.State.Data[tdName] = newTestData("Missing Subject") + if r.Config.Data.SubjectMissing { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + subjDesc := image.Descriptor{ + MediaType: mtOCIImage, + Size: 123, + Digest: digest.FromString("missing content"), + } + _, err = r.State.Data[tdName].genManifestFull( + genWithArtifactType(mtExampleConf1), + genWithAnnotations(map[string]string{ + "org.opencontainers.conformance": "hello conformance test", + }), + genWithAnnotationUniq(), + genWithConfigMediaType(mtOCIEmptyJSON), + genWithConfigBytes([]byte("{}")), + genWithLayerCount(1), + genWithLayerMediaType(mtExampleConf1), + genWithSubject(subjDesc), + genWithTag("tagged-artifact"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // data field in descriptor + tdName = "data-field" + r.State.Data[tdName] = newTestData("Data Field") + if r.Config.Data.DataField { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err := r.State.Data[tdName].genManifestFull( + genWithTag("data-field"), + genWithDescriptorData(), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // image with non-distributable layers + tdName = "non-distributable-layers" + r.State.Data[tdName] = newTestData("Non-distributable Layers") + if r.Config.Data.Nondistributable { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + + b := make([]byte, 256) + layers := make([]image.Descriptor, 3) + confDig := make([]digest.Digest, 3) + // first layer is compressed + non-distributable + _, err := rand.Read(b) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + confDig[0] = digest.Canonical.FromBytes(b) + _, err = rand.Read(b) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + dig := digest.Canonical.FromBytes(b) + layers[0] = image.Descriptor{ + MediaType: mtOCILayerNdGz, + Digest: dig, + Size: 123456, + URLs: []string{"https://store.example.com/blobs/sha256/" + dig.Encoded()}, + } + // second layer is uncompressed + non-distributable + _, err = rand.Read(b) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + dig = digest.Canonical.FromBytes(b) + confDig[1] = dig + layers[1] = image.Descriptor{ + MediaType: mtOCILayerNd, + Digest: dig, + Size: 12345, + URLs: []string{"https://store.example.com/blobs/sha256/" + dig.Encoded()}, + } + // third layer is normal + cDig, ucDig, _, err := r.State.Data[tdName].genLayer(1) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + confDig[2] = ucDig + layers[2] = *r.State.Data[tdName].desc[cDig] + // generate the config + cDig, _, err = r.State.Data[tdName].genConfig(image.Platform{OS: "linux", Architecture: "amd64"}, confDig) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + // generate the manifest + _, _, err = r.State.Data[tdName].genManifest(*r.State.Data[tdName].desc[cDig], layers, + genWithTag("non-distributable-image"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // add a randomized unknown field to manifests and config + tdName = "custom-fields" + r.State.Data[tdName] = newTestData("Custom Fields") + if r.Config.Data.CustomFields { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err = r.State.Data[tdName].genIndexFull( + genWithTag("custom-fields"), + genWithPlatforms([]*image.Platform{ + {OS: "linux", Architecture: "amd64"}, + {OS: "linux", Architecture: "arm64"}, + }), + genWithExtraField(), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // image with an empty layer list + tdName = "no-layers" + r.State.Data[tdName] = newTestData("No Layers") + if r.Config.Data.NoLayers { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err := r.State.Data[tdName].genManifestFull( + genWithTag("no-layers"), + genWithLayerCount(0), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // sparse manifests missing layers/platforms + tdName = "sparse" + r.State.Data[tdName] = newTestData("Sparse Manifests") + if r.Config.Data.Sparse { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err := r.State.Data[tdName].genManifestFull( + genWithTag("sparse-image"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + for dig := range r.State.Data[tdName].blobs { + if strings.HasPrefix(r.State.Data[tdName].desc[dig].MediaType, mtOCILayerPre) { + // remove the first layer we find + delete(r.State.Data[tdName].desc, dig) + delete(r.State.Data[tdName].blobs, dig) + break + } + } + // for the index, make an image and a random digest/descriptor, add both to an index + imagePlat := image.Platform{ + OS: "linux", + Architecture: "amd64", + } + imageDig, err := r.State.Data[tdName].genManifestFull( + genWithPlatform(imagePlat), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + randPlat := image.Platform{ + OS: "linux", + Architecture: "arm64", + } + b := make([]byte, 1024) + _, err = rand.Read(b) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + randDig := digest.Canonical.FromBytes(b) + r.State.Data[tdName].desc[randDig] = &image.Descriptor{ + MediaType: mtOCIImage, + Digest: randDig, + Size: 1024, + } + _, _, err = r.State.Data[tdName].genIndex( + []*image.Platform{&imagePlat, &randPlat}, + []digest.Digest{imageDig, randDig}, + genWithTag("sparse-index"), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // non-canonical digests require tag parameters to push tags, so only test by digest + tdName = "sha512" + r.State.Data[tdName] = newTestData("Digest Algorithm sha512") + if r.Config.Data.Sha512 { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err := r.State.Data[tdName].genIndexFull( + genWithAlgo(digest.SHA512), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // push using tag parameters and sha256 digest + tdName = "tag-param-sha256" + r.State.Data[tdName] = newTestData("Tag Param") + if r.Config.APIs.Manifests.TagParam { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err = r.State.Data[tdName].genIndexFull( + genWithTag("tag-param-sha256"), // top level index will have two tags + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + for i, dig := range r.State.Data[tdName].manOrder { + r.State.Data[tdName].tags[fmt.Sprintf("%s-%d", "tag-param-sha256", i)] = dig + r.State.Data[tdName].pushOpts[dig] = []apiDoOpt{apiWithFlag("TagParam")} + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + // push using tag parameters and sha512 digest + tdName = "tag-param-sha512" + r.State.Data[tdName] = newTestData("Tag Param sha512") + if r.Config.APIs.Manifests.TagParam && r.Config.Data.Sha512 { + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + _, err := r.State.Data[tdName].genIndexFull( + genWithTag("tag-param-sha512"), // top level index will have two tags + genWithAlgo(digest.SHA512), + ) + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + for i, dig := range r.State.Data[tdName].manOrder { + r.State.Data[tdName].tags[fmt.Sprintf("%s-%d", "tag-param-sha512", i)] = dig + r.State.Data[tdName].pushOpts[dig] = []apiDoOpt{apiWithFlag("TagParam")} + } + } else { + r.State.DataStatus[tdName] = statusDisabled + } + tdName = "bad-digest-image" + r.State.Data[tdName] = newTestData("Bad Digest Image") + r.State.DataStatus[tdName] = statusUnknown + dataTests = append(dataTests, tdName) + dig, err := r.State.Data[tdName].genManifestFull() + if err != nil { + return fmt.Errorf("failed to generate test data: %w", err) + } + r.State.Data[tdName].pullOpts[dig] = []apiDoOpt{apiWithFlag("SkipPullTest")} + r.State.Data[tdName].pushOpts[dig] = []apiDoOpt{apiWithFlag("ExpectBadDigest")} + // add some whitespace to make the digest mismatch + r.State.Data[tdName].manifests[dig] = append(r.State.Data[tdName].manifests[dig], []byte(" ")...) + return nil +} + +func (r *runner) Report(w io.Writer) { + _, _ = fmt.Fprintf(w, "Test results\n") + r.Results.ReportWalkErr(w, "") + _, _ = fmt.Fprintf(w, "\n") + + _, _ = fmt.Fprintf(w, "Configuration:\n") + _, _ = fmt.Fprintf(w, " %s", strings.ReplaceAll(r.Config.Report(), "\n", "\n ")) + _, _ = fmt.Fprintf(w, "\n") + + _, _ = fmt.Fprintf(w, "OCI Conformance Result: %s\n", r.Results.Status.String()) + padWidth := 30 + + statusTotal := 0 + for i := status(1); i < statusMax; i++ { + pad := "" + if len(i.String()) < padWidth { + pad = strings.Repeat(".", padWidth-len(i.String())) + } + _, _ = fmt.Fprintf(w, " %s%s: %10d\n", i.String(), pad, r.Results.Counts[i]) + statusTotal += r.Results.Counts[i] + } + pad := strings.Repeat(".", padWidth-len("Total")) + _, _ = fmt.Fprintf(w, " %s%s: %10d\n\n", "Total", pad, statusTotal) + + if len(r.Results.Errs) > 0 { + _, _ = fmt.Fprintf(w, "Errors:\n%s\n\n", errors.Join(r.Results.Errs...)) + } + + _, _ = fmt.Fprintf(w, "API conformance:\n") + for i := range stateAPIMax { + pad := "" + if len(i.String()) < padWidth { + pad = strings.Repeat(".", padWidth-len(i.String())) + } + _, _ = fmt.Fprintf(w, " %s%s: %10s\n", i.String(), pad, r.State.APIStatus[i].String()) + } + _, _ = fmt.Fprintf(w, "\n") + + _, _ = fmt.Fprintf(w, "Data conformance:\n") + tdNames := []string{} + for tdName := range r.State.Data { + tdNames = append(tdNames, tdName) + } + sort.Strings(tdNames) + for _, tdName := range tdNames { + pad := "" + if len(r.State.Data[tdName].name) < padWidth { + pad = strings.Repeat(".", padWidth-len(r.State.Data[tdName].name)) + } + _, _ = fmt.Fprintf(w, " %s%s: %10s\n", r.State.Data[tdName].name, pad, r.State.DataStatus[tdName].String()) + } + _, _ = fmt.Fprintf(w, "\n") +} + +func (r *runner) ReportJunit(w io.Writer) error { + ju := r.toJunit() + enc := xml.NewEncoder(w) + enc.Indent("", " ") + return enc.Encode(ju) +} + +func (r *runner) toJunit() *junitTestSuites { + statusTotal := 0 + for i := status(1); i < statusMax; i++ { + statusTotal += r.Results.Counts[i] + } + tSec := fmt.Sprintf("%f", r.Results.Stop.Sub(r.Results.Start).Seconds()) + jTSuites := junitTestSuites{ + Tests: statusTotal, + Errors: r.Results.Counts[statusError], + Failures: r.Results.Counts[statusFail], + Skipped: r.Results.Counts[statusSkip], + Disabled: r.Results.Counts[statusDisabled], + Time: tSec, + } + jTSuite := junitTestSuite{ + Name: r.Results.Name, + Tests: statusTotal, + Errors: r.Results.Counts[statusError], + Failures: r.Results.Counts[statusFail], + Skipped: r.Results.Counts[statusSkip], + Disabled: r.Results.Counts[statusDisabled], + Time: tSec, + Testcases: r.Results.ToJunitTestCases(), + } + jTSuite.Properties = []junitProperty{{Name: "Config", Value: r.Config.Report()}} + jTSuites.Suites = []junitTestSuite{jTSuite} + return &jTSuites +} + +type reportData struct { + Config config + Results *results + NumTotal int + NumPassed int + NumFailed int + NumSkipped int + PercentPassed int + PercentFailed int + PercentSkipped int + StartTimeString string + EndTimeString string + RunTime string + AllPassed bool + AllFailed bool + AllSkipped bool + Version string +} + +func (r *runner) ReportHTML(w io.Writer) error { + data := reportData{ + Config: r.Config, + Results: r.Results, + NumTotal: r.Results.Counts[statusPass] + r.Results.Counts[statusFail] + r.Results.Counts[statusError] + r.Results.Counts[statusSkip] + r.Results.Counts[statusDisabled], + NumPassed: r.Results.Counts[statusPass], + NumFailed: r.Results.Counts[statusFail] + r.Results.Counts[statusError], + NumSkipped: r.Results.Counts[statusSkip] + r.Results.Counts[statusDisabled], + StartTimeString: r.Results.Start.Format("Jan 2 15:04:05.000 -0700 MST"), + EndTimeString: r.Results.Stop.Format("Jan 2 15:04:05.000 -0700 MST"), + RunTime: r.Results.Stop.Sub(r.Results.Start).String(), + } + data.PercentPassed = int(math.Round(float64(data.NumPassed) / float64(data.NumTotal) * 100)) + data.PercentFailed = int(math.Round(float64(data.NumFailed) / float64(data.NumTotal) * 100)) + data.PercentSkipped = int(math.Round(float64(data.NumSkipped) / float64(data.NumTotal) * 100)) + data.AllPassed = data.NumPassed == data.NumTotal + data.AllFailed = data.NumFailed == data.NumTotal + data.AllSkipped = data.NumSkipped == data.NumTotal + data.Version = r.Config.Version + // load all templates + t := template.New("report") + for name, value := range confHTMLTemplates { + tAdd, err := template.New(name).Parse(value) + if err != nil { + return fmt.Errorf("cannot parse report template %s: %v", name, err) + } + t, err = t.AddParseTree(name, tAdd.Tree) + if err != nil { + return fmt.Errorf("cannot add report template %s to tree: %v", name, err) + } + } + // execute the top level report template + return t.ExecuteTemplate(w, "report", data) +} + +func (r *runner) ReportResultsYAML(w io.Writer) error { + results := struct { + Config config `yaml:"config"` + APIs map[stateAPIType]status `yaml:"apis"` + Data map[string]status `yaml:"data"` + }{ + Config: r.Config.Redact(), + APIs: r.State.APIStatus, + Data: map[string]status{}, + } + for k, v := range r.State.DataStatus { + results.Data[r.State.Data[k].name] = v + } + return yaml.NewEncoder(w).Encode(results) +} + +func (r *runner) TestAll() error { + errs := []error{} + r.Results.Start = time.Now() + repo := r.Config.Repo1 + repo2 := r.Config.Repo2 + + err := r.GenerateData() + if err != nil { + return fmt.Errorf("aborting tests, unable to generate data: %w", err) + } + + err = r.TestPing(r.Results) + if err != nil { + errs = append(errs, err) + } + + err = r.TestEmpty(r.Results, repo) + if err != nil { + errs = append(errs, err) + } + + algos := []digest.Algorithm{digest.SHA256} + if r.Config.Data.Sha512 { + algos = append(algos, digest.SHA512) + } else { + tdName := "blobs-" + digest.SHA512.String() + r.State.Data[tdName] = newTestData("Blobs " + digest.SHA512.String()) + r.State.DataStatus[tdName] = statusDisabled + } + for _, algo := range algos { + err = r.TestBlobAPIs(r.Results, "blobs-"+algo.String(), "Blobs "+algo.String(), algo, repo, repo2) + if err != nil { + errs = append(errs, err) + } + } + + // loop over different types of data + for _, tdName := range dataTests { + if r.State.DataStatus[tdName] == statusDisabled { + continue + } + err = r.ChildRun(tdName, r.Results, func(r *runner, res *results) error { + errs := []error{} + // push + err := r.TestPush(res, tdName, repo) + if err != nil { + errs = append(errs, err) + } + // list, pull, and query + err = r.TestList(res, tdName, repo) + if err != nil { + errs = append(errs, err) + } + err = r.TestHead(res, tdName, repo) + if err != nil { + errs = append(errs, err) + } + err = r.TestPull(res, tdName, repo) + if err != nil { + errs = append(errs, err) + } + err = r.TestReferrers(res, tdName, repo) + if err != nil { + errs = append(errs, err) + } + // delete + err = r.TestDelete(res, tdName, repo) + if err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) + }) + if err != nil { + errs = append(errs, err) + } + } + + // various manifest error conditions + err = r.TestManifestErrors(r.Results, repo) + if err != nil { + errs = append(errs, err) + } + + r.Results.Stop = time.Now() + + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} + +func (r *runner) TestBlobAPIs(parent *results, tdName, tdDesc string, algo digest.Algorithm, repo, repo2 string) error { + return r.ChildRun(algo.String()+" blobs", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobPush); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobPush) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + errs := []error{} + r.State.Data[tdName] = newTestData(tdDesc) + r.State.DataStatus[tdName] = statusUnknown + digests := map[string]digest.Digest{} + if _, ok := blobAPIsTestedByAlgo[algo]; !ok { + blobAPIsTestedByAlgo[algo] = &[stateAPIMax]bool{} + } + blobAPITests := []string{"post only", "post+put", "chunked single", "stream", "mount", "mount anonymous", "mount missing", "post cancel"} + for _, name := range blobAPITests { + dig, _, err := r.State.Data[tdName].genBlob(genWithBlobSize(512), genWithAlgo(algo)) + if err != nil { + return fmt.Errorf("failed to generate blob: %w", err) + } + digests[name] = dig + } + // try pulling a blob that has not been pushed + err := r.ChildRun("get-missing", res, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobGetFull); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobGetFull) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.BlobGetReq(r.Config.schemeReg, repo, digests["post cancel"], r.State.Data[tdName], apiExpectStatus(http.StatusNotFound), apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobGetFull) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobGetFull) + return nil + }) + if err != nil { + errs = append(errs, err) + } + blobAPITests = append(blobAPITests, "chunked multi", "chunked multi and put chunk", "chunked out-of-order", "chunked out-of-order and put chunk") + minChunkSize := int64(chunkMin) + minHeader := "" + // test the various blob push APIs + for _, testName := range blobAPITests { + err := r.ChildRun(testName, res, func(r *runner, res *results) error { + var err error + errs := []error{} + dig := digests[testName] + var api stateAPIType + switch testName { + case "post only": + api = stateAPIBlobPostOnly + err = r.TestPushBlobPostOnly(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + case "post+put": + api = stateAPIBlobPostPut + err = r.TestPushBlobPostPut(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + case "chunked single": + api = stateAPIBlobPatchChunked + // extract the min chunk length from a chunked push with a single chunk + err = r.TestPushBlobPatchChunked(res, tdName, repo, dig, apiReturnHeader("OCI-Chunk-Min-Length", &minHeader)) + if err != nil { + errs = append(errs, err) + } + if minHeader != "" { + minParse, err := strconv.Atoi(minHeader) + if err == nil && int64(minParse) > minChunkSize { + minChunkSize = int64(minParse) + } + } + case "chunked multi": + api = stateAPIBlobPatchChunked + // generate a blob large enough to span three chunks + dig, _, err = r.State.Data[tdName].genBlob(genWithBlobSize(minChunkSize*3-5), genWithAlgo(algo)) + if err != nil { + return fmt.Errorf("failed to generate chunked blob of size %d: %w", minChunkSize*3-5, err) + } + digests[testName] = dig + err = r.TestPushBlobPatchChunked(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + case "chunked multi and put chunk": + api = stateAPIBlobPatchChunked + // generate a blob large enough to span three chunks + dig, _, err = r.State.Data[tdName].genBlob(genWithBlobSize(minChunkSize*3-5), genWithAlgo(algo)) + if err != nil { + return fmt.Errorf("failed to generate chunked blob of size %d: %w", minChunkSize*3-5, err) + } + digests[testName] = dig + err = r.TestPushBlobPatchChunked(res, tdName, repo, dig, apiWithFlag("PutLastChunk")) + if err != nil { + errs = append(errs, err) + } + case "chunked out-of-order": + api = stateAPIBlobPatchChunked + // generate a blob large enough to span three chunks + dig, _, err = r.State.Data[tdName].genBlob(genWithBlobSize(minChunkSize*3-5), genWithAlgo(algo)) + if err != nil { + return fmt.Errorf("failed to generate chunked blob of size %d: %w", minChunkSize*3-5, err) + } + digests[testName] = dig + err = r.TestPushBlobPatchChunked(res, tdName, repo, dig, apiWithFlag("OutOfOrderChunks")) + if err != nil { + errs = append(errs, err) + } + case "chunked out-of-order and put chunk": + api = stateAPIBlobPatchChunked + // generate a blob large enough to span three chunks + dig, _, err = r.State.Data[tdName].genBlob(genWithBlobSize(minChunkSize*3-5), genWithAlgo(algo)) + if err != nil { + return fmt.Errorf("failed to generate chunked blob of size %d: %w", minChunkSize*3-5, err) + } + digests[testName] = dig + err = r.TestPushBlobPatchChunked(res, tdName, repo, dig, apiWithFlag("PutLastChunk"), apiWithFlag("OutOfOrderChunks")) + if err != nil { + errs = append(errs, err) + } + case "stream": + api = stateAPIBlobPatchStream + err = r.TestPushBlobPatchStream(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + case "post cancel": + api = stateAPIBlobPostPut + err = r.TestPushBlobPostCancel(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + case "mount": + api = stateAPIBlobMountSource + // first push to repo2 + err = r.TestPushBlobAny(res, tdName, repo2, dig) + if err != nil { + errs = append(errs, err) + } + // then mount repo2 to repo + err = r.TestPushBlobMount(res, tdName, repo, repo2, dig) + if err != nil { + errs = append(errs, err) + } + case "mount anonymous": + api = stateAPIBlobMountAnonymous + // first push to repo2 + err = r.TestPushBlobAny(res, tdName, repo2, dig) + if err != nil { + errs = append(errs, err) + } + // then mount repo2 to repo + err = r.TestPushBlobMountAnonymous(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + case "mount missing": + // mount repo2 to repo without first pushing there + err = r.TestPushBlobMountMissing(res, tdName, repo, repo2, dig) + if err != nil { + errs = append(errs, err) + } + default: + return fmt.Errorf("unknown api test %s", testName) + } + // track the used APIs so TestPushBlobAny doesn't rerun tests + blobAPIsTested[api] = true + blobAPIsTestedByAlgo[dig.Algorithm()][api] = true + if err == nil && testName != "post cancel" { + // head request + err = r.TestHeadBlob(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + // pull each blob + err = r.TestPullBlob(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + } + // cleanup + err = r.TestDeleteBlob(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + if testName == "mount" || testName == "mount anonymous" { + err = r.TestDeleteBlob(res, tdName, repo2, dig) + if err != nil { + errs = append(errs, err) + } + } + return errors.Join(errs...) + }) + if err != nil { + errs = append(errs, err) + } + } + // verify support for range requests + err = r.ChildRun("range requests", res, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobGetRange, stateAPIBlobPush); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobGetRange) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + // setup by pushing a blob, any failures will return immediately + blobLen := int64(2048) + blobLenStr := fmt.Sprintf("%d", blobLen) + dig, blobBody, err := r.State.Data[tdName].genBlob(genWithBlobSize(blobLen), genWithAlgo(algo)) + if err != nil { + return err + } + if err := r.TestPushBlobAny(res, tdName, repo, dig); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobGetRange) + return err + } + errs := []error{} + rangeTests := []struct { + name string + reqOpts []apiDoOpt + respOpts []apiDoOpt // response opts are separated to run them conditionally with a fallback + }{ + { + name: "range 500-1499", + reqOpts: []apiDoOpt{ + apiWithHeaderAdd("Range", "bytes=500-1499"), + }, + respOpts: []apiDoOpt{ + apiExpectBody(blobBody[500:1500]), + apiExpectStatus(http.StatusPartialContent), + apiExpectHeader("Content-Length", "1000"), + apiWithOr( + []apiDoOpt{apiExpectHeader("Content-Range", "bytes 500-1499/"+blobLenStr)}, + []apiDoOpt{apiExpectHeader("Content-Range", "bytes 500-1499/*")}, + ), + }, + }, + { + name: "range 500-", + reqOpts: []apiDoOpt{ + apiWithHeaderAdd("Range", "bytes=500-"), + }, + respOpts: []apiDoOpt{ + apiExpectBody(blobBody[500:]), + apiExpectStatus(http.StatusPartialContent), + apiExpectHeader("Content-Length", fmt.Sprintf("%d", blobLen-500)), + apiWithOr( + []apiDoOpt{apiExpectHeader("Content-Range", fmt.Sprintf("bytes 500-%d/%d", blobLen-1, blobLen))}, + []apiDoOpt{apiExpectHeader("Content-Range", fmt.Sprintf("bytes 500-%d/*", blobLen-1))}, + ), + }, + }, + { + name: "range -500", + reqOpts: []apiDoOpt{ + apiWithHeaderAdd("Range", "bytes=-500"), + }, + respOpts: []apiDoOpt{ + apiExpectBody(blobBody[blobLen-500:]), + apiExpectStatus(http.StatusPartialContent), + apiExpectHeader("Content-Length", "500"), + apiWithOr( + []apiDoOpt{apiExpectHeader("Content-Range", fmt.Sprintf("bytes %d-%d/%d", blobLen-500, blobLen-1, blobLen))}, + []apiDoOpt{apiExpectHeader("Content-Range", fmt.Sprintf("bytes %d-%d/*", blobLen-500, blobLen-1))}, + ), + }, + }, + { + name: "range 2000-5000", + reqOpts: []apiDoOpt{ + apiWithHeaderAdd("Range", "bytes=2000-5000"), + }, + respOpts: []apiDoOpt{ + apiExpectBody(blobBody[2000:]), + apiExpectStatus(http.StatusPartialContent), + apiExpectHeader("Content-Length", fmt.Sprintf("%d", blobLen-2000)), + apiWithOr( + []apiDoOpt{apiExpectHeader("Content-Range", fmt.Sprintf("bytes %d-%d/%d", 2000, blobLen-1, blobLen))}, + []apiDoOpt{apiExpectHeader("Content-Range", fmt.Sprintf("bytes %d-%d/*", 2000, blobLen-1))}, + ), + }, + }, + { + name: "range 500-0", + reqOpts: []apiDoOpt{ + apiWithHeaderAdd("Range", "bytes=500-0"), + }, + respOpts: []apiDoOpt{ + apiExpectStatus(http.StatusRequestedRangeNotSatisfiable), + }, + }, + { + name: "range 5000-10000", + reqOpts: []apiDoOpt{ + apiWithHeaderAdd("Range", "bytes=5000-10000"), + }, + respOpts: []apiDoOpt{ + apiExpectStatus(http.StatusRequestedRangeNotSatisfiable), + }, + }, + } + for _, rt := range rangeTests { + err := r.ChildRun(rt.name, res, func(r *runner, res *results) error { + var status int + rangeOpts := []apiDoOpt{ + apiSaveOutput(res.Output), + apiWithAnd(rt.reqOpts), + apiWithOr(rt.respOpts, + []apiDoOpt{ // if rt.opts fails, it may fall back to a standard blob pull + apiExpectStatus(http.StatusOK), + apiExpectHeader("Content-Length", blobLenStr), + apiExpectBody(blobBody), + apiReturnStatus(&status), + }), + } + if err := r.API.BlobGetReq(r.Config.schemeReg, repo, dig, r.State.Data[tdName], rangeOpts...); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobGetRange) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + // detect a fallback + if status == http.StatusOK { + err := fmt.Errorf("range request unsupported, full blob returned%.0w", errRegUnsupported) + r.TestFail(res, err, tdName, stateAPIBlobGetRange) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobGetRange) + return nil + }) + if err != nil { + errs = append(errs, err) + } + } + if err := r.TestDeleteBlob(res, tdName, repo, dig); err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) + }) + if err != nil { + errs = append(errs, err) + } + // test various well known blob contents + blobDataTests := map[string][]byte{} + if r.Config.Data.EmptyBlob { + blobDataTests["empty"] = []byte("") + } + blobDataTests["emptyJSON"] = []byte("{}") + for name, val := range blobDataTests { + dig := algo.FromBytes(val) + digests[name] = dig + r.State.Data[tdName].blobs[dig] = val + } + for name := range blobDataTests { + err := r.ChildRun(name, res, func(r *runner, res *results) error { + dig := digests[name] + err := r.TestPushBlobAny(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + err = r.TestHeadBlob(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + err = r.TestPullBlob(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + err = r.TestDeleteBlob(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) + }) + if err != nil { + errs = append(errs, err) + } + } + // test the various blob push APIs with a bad digest + blobAPIBadDigTests := []string{"bad digest post only", "bad digest post+put", "bad digest chunked", "bad digest chunked and put chunk", "bad digest stream"} + for _, name := range blobAPIBadDigTests { + dig, _, err := r.State.Data[tdName].genBlob(genWithBlobSize(minChunkSize*3-5), genWithAlgo(algo)) + if err != nil { + return fmt.Errorf("failed to generate blob: %w", err) + } + // corrupt the blob bytes + r.State.Data[tdName].blobs[dig] = append(r.State.Data[tdName].blobs[dig], []byte("oh no")...) + digests[name] = dig + } + optBadDig := apiWithFlag("ExpectBadDigest") + for _, testName := range blobAPIBadDigTests { + err := r.ChildRun(testName, res, func(r *runner, res *results) error { + dig := digests[testName] + switch testName { + case "bad digest post only": + return r.TestPushBlobPostOnly(res, tdName, repo, dig, optBadDig) + case "bad digest post+put": + return r.TestPushBlobPostPut(res, tdName, repo, dig, optBadDig) + case "bad digest chunked": + return r.TestPushBlobPatchChunked(res, tdName, repo, dig, optBadDig) + case "bad digest chunked and put chunk": + return r.TestPushBlobPatchChunked(res, tdName, repo, dig, optBadDig) + case "bad digest stream": + return r.TestPushBlobPatchStream(res, tdName, repo, dig, optBadDig) + default: + return fmt.Errorf("unknown api test %s", testName) + } + }) + if err != nil { + errs = append(errs, err) + } + } + + return errors.Join(errs...) + }) +} + +func (r *runner) TestDelete(parent *results, tdName string, repo string) error { + return r.ChildRun("delete", parent, func(r *runner, res *results) error { + errs := []error{} + delOrder := slices.Clone(r.State.Data[tdName].manOrder) + slices.Reverse(delOrder) + for tag, dig := range r.State.Data[tdName].tags { + err := r.TestDeleteTag(res, tdName, repo, tag, dig) + if err != nil { + errs = append(errs, fmt.Errorf("failed to delete manifest tag %s%.0w", tag, err)) + } + } + for i, dig := range delOrder { + err := r.TestDeleteManifest(res, tdName, repo, dig) + if err != nil { + errs = append(errs, fmt.Errorf("failed to delete manifest %d, digest %s%.0w", i, dig.String(), err)) + } + } + for dig := range r.State.Data[tdName].blobs { + err := r.TestDeleteBlob(res, tdName, repo, dig) + if err != nil { + errs = append(errs, fmt.Errorf("failed to delete blob %s%.0w", dig.String(), err)) + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil + }) +} + +func (r *runner) TestDeleteTag(parent *results, tdName string, repo string, tag string, dig digest.Digest) error { + td := r.State.Data[tdName] + if !td.tagPushed[tag] { + return nil // tag was not pushed so skip the attempt to delete it + } + return r.ChildRun("tag-delete", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPITagDelete); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, stateAPITagDelete, stateAPITagDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.ManifestDelete(r.Config.schemeReg, repo, tag, dig, td, apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, tdName, stateAPITagDelete) + r.TestSkip(res, err, tdName, stateAPITagDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPITagDelete) + // verify tag delete finished immediately + if err := r.APIRequire(stateAPITagDeleteAtomic); err != nil { + r.TestSkip(res, err, tdName, stateAPITagDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.ManifestHeadReq(r.Config.schemeReg, repo, tag, dig, td, apiSaveOutput(res.Output), apiExpectStatus(http.StatusNotFound)); err != nil { + r.TestFail(res, err, tdName, stateAPITagDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPITagDeleteAtomic) + return nil + }) +} + +func (r *runner) TestDeleteManifest(parent *results, tdName string, repo string, dig digest.Digest) error { + td := r.State.Data[tdName] + return r.ChildRun("manifest-delete", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIManifestDelete); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, stateAPIManifestDelete, stateAPIManifestDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.ManifestDelete(r.Config.schemeReg, repo, dig.String(), dig, td, apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, tdName, stateAPIManifestDelete) + r.TestSkip(res, err, tdName, stateAPIManifestDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIManifestDelete) + // verify manifest delete finished immediately + if err := r.APIRequire(stateAPIManifestDeleteAtomic); err != nil { + r.TestSkip(res, err, tdName, stateAPIManifestDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.ManifestHeadReq(r.Config.schemeReg, repo, dig.String(), dig, td, apiSaveOutput(res.Output), apiExpectStatus(http.StatusNotFound)); err != nil { + r.TestFail(res, err, tdName, stateAPIManifestDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIManifestDeleteAtomic) + return nil + }) +} + +func (r *runner) TestDeleteBlob(parent *results, tdName string, repo string, dig digest.Digest) error { + return r.ChildRun("blob-delete", parent, func(r *runner, res *results) error { + td := r.State.Data[tdName] + if err := r.APIRequire(stateAPIBlobDelete); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobDelete, stateAPIBlobDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.BlobDelete(r.Config.schemeReg, repo, dig, td, apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobDelete) + r.TestSkip(res, err, tdName, stateAPIBlobDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobDelete) + // verify blob delete finished immediately + if err := r.APIRequire(stateAPIBlobDeleteAtomic); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.BlobHeadReq(r.Config.schemeReg, repo, dig, td, apiSaveOutput(res.Output), apiExpectStatus(http.StatusNotFound)); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobDeleteAtomic) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobDeleteAtomic) + return nil + }) +} + +func (r *runner) TestEmpty(parent *results, repo string) error { + return r.ChildRun("empty", parent, func(r *runner, res *results) error { + errs := []error{} + if err := r.TestEmptyTagList(res, repo); err != nil { + errs = append(errs, err) + } + if err := r.TestEmptyReferrers(res, repo); err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil + }) +} + +func (r *runner) TestEmptyReferrers(parent *results, repo string) error { + return r.ChildRun("referrers", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIReferrers); err != nil { + r.TestSkip(res, err, "", stateAPIReferrers) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + subj := digest.Canonical.FromString(rand.Text()) + _, err := r.API.ReferrersList(r.Config.schemeReg, repo, subj, apiSaveOutput(res.Output)) + if err != nil { + r.TestFail(res, err, "", stateAPIReferrers) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, "", stateAPIReferrers) + return nil + }) +} + +func (r *runner) TestEmptyTagList(parent *results, repo string) error { + return r.ChildRun("tag list", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPITagList); err != nil { + r.TestSkip(res, err, "", stateAPITagList) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if _, err := r.API.TagList(r.Config.schemeReg, repo, apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, "", stateAPITagList) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, "", stateAPITagList) + return nil + }) +} + +func (r *runner) TestHead(parent *results, tdName string, repo string) error { + return r.ChildRun("head", parent, func(r *runner, res *results) error { + errs := []error{} + for tag, dig := range r.State.Data[tdName].tags { + err := r.TestHeadManifestTag(res, tdName, repo, tag, dig, r.State.Data[tdName].pullOpts[dig]...) + if err != nil { + errs = append(errs, fmt.Errorf("failed to send head request for manifest by tag %s%.0w", tag, err)) + } + } + for i, dig := range r.State.Data[tdName].manOrder { + err := r.TestHeadManifestDigest(res, tdName, repo, dig, r.State.Data[tdName].pullOpts[dig]...) + if err != nil { + errs = append(errs, fmt.Errorf("failed to send head request for manifest %d, digest %s%.0w", i, dig.String(), err)) + } + } + for dig := range r.State.Data[tdName].blobs { + err := r.TestHeadBlob(res, tdName, repo, dig, r.State.Data[tdName].pullOpts[dig]...) + if err != nil { + errs = append(errs, fmt.Errorf("failed to send head request for blob %s%.0w", dig.String(), err)) + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil + }) +} + +func (r *runner) TestHeadBlob(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + flags := r.API.GetFlags(opts...) + if flags["SkipPullTest"] { + return nil + } + return r.ChildRun("blob-head", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobHead); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobHead) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + opts := []apiDoOpt{apiSaveOutput(res.Output)} + if r.Config.APIs.Blobs.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.BlobHeadExists(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobHead) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobHead) + return nil + }) +} + +func (r *runner) TestHeadManifestDigest(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + flags := r.API.GetFlags(opts...) + if flags["SkipPullTest"] { + return nil + } + td := r.State.Data[tdName] + apis := []stateAPIType{} + return r.ChildRun("manifest-head-by-digest", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIManifestHeadDigest); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, stateAPIManifestHeadDigest) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + apis = append(apis, stateAPIManifestHeadDigest) + opts := []apiDoOpt{apiSaveOutput(res.Output)} + if r.Config.APIs.Manifests.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.ManifestHeadExists(r.Config.schemeReg, repo, dig.String(), dig, td, opts...); err != nil { + r.TestFail(res, err, tdName, apis...) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, apis...) + return nil + }) +} + +func (r *runner) TestHeadManifestTag(parent *results, tdName string, repo string, tag string, dig digest.Digest, opts ...apiDoOpt) error { + flags := r.API.GetFlags(opts...) + if flags["SkipPullTest"] { + return nil + } + td := r.State.Data[tdName] + apis := []stateAPIType{} + return r.ChildRun("manifest-head-by-tag", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIManifestHeadTag); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, stateAPIManifestHeadTag) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + apis = append(apis, stateAPIManifestHeadTag) + opts := []apiDoOpt{apiSaveOutput(res.Output)} + if r.Config.APIs.Manifests.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.ManifestHeadExists(r.Config.schemeReg, repo, tag, dig, td, opts...); err != nil { + r.TestFail(res, err, tdName, apis...) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, apis...) + return nil + }) +} + +func (r *runner) TestList(parent *results, tdName string, repo string) error { + return r.ChildRun("tag-list", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPITagList); err != nil { + r.TestSkip(res, err, tdName, stateAPITagList) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + tagList, err := r.API.TagList(r.Config.schemeReg, repo, apiSaveOutput(res.Output)) + if err != nil { + r.TestFail(res, err, tdName, stateAPITagList) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + errs := []error{} + for tag := range r.State.Data[tdName].tags { + if !slices.Contains(tagList.Tags, tag) { + errs = append(errs, fmt.Errorf("missing tag %q from listing%.0w", tag, errAPITestFail)) + } + } + if len(tagList.Tags) >= 2 { + sortedTags := slices.Clone(tagList.Tags) + slices.Sort(sortedTags) + end := len(sortedTags) / 2 + last := sortedTags[end-1] + // test the last parameter + partialList, err := r.API.TagList(r.Config.schemeReg, repo, apiSaveOutput(res.Output), apiWithURLParam("last", last)) + if err != nil { + r.TestFail(res, err, tdName, stateAPITagList) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + for _, tag := range sortedTags[:end] { + if slices.Contains(partialList.Tags, tag) { + errs = append(errs, fmt.Errorf("tag %q returned when last set to %q%.0w", tag, last, errAPITestFail)) + } + } + for _, tag := range sortedTags[end:] { + if !slices.Contains(partialList.Tags, tag) { + errs = append(errs, fmt.Errorf("tag %q missing when last set to %q%.0w", tag, last, errAPITestFail)) + } + } + } + if len(errs) > 0 { + r.TestFail(res, errors.Join(errs...), tdName, stateAPITagList) + return errors.Join(errs...) + } + r.TestPass(res, tdName, stateAPITagList) + return nil + }) +} + +func (r *runner) TestManifestErrors(parent *results, repo string) error { + errs := []error{} + err := r.ChildRun("missing-manifest", parent, func(r *runner, res *results) error { + errs := []error{} + err := r.ChildRun("by-digest", res, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIManifestGetDigest); err != nil { + r.TestSkip(res, err, "", stateAPIManifestGetDigest) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + b := make([]byte, 1024) + _, err := rand.Read(b) + if err != nil { + return err + } + dig := digest.Canonical.FromBytes(b) + if err := r.API.ManifestGetReq(r.Config.schemeReg, repo, dig.String(), dig, nil, + apiExpectStatus(http.StatusNotFound), apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, "", stateAPIManifestGetDigest) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, "", stateAPIManifestGetDigest) + return nil + }) + if err != nil { + errs = append(errs, err) + } + err = r.ChildRun("by-tag", res, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIManifestGetTag); err != nil { + r.TestSkip(res, err, "", stateAPIManifestGetTag) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + rnd := rand.Text() + tag := fmt.Sprintf("missing-%.20s", strings.ToLower(rnd)) + if err := r.API.ManifestGetReq(r.Config.schemeReg, repo, tag, digest.Digest(""), nil, + apiExpectStatus(http.StatusNotFound), apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, "", stateAPIManifestGetTag) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, "", stateAPIManifestGetTag) + return nil + }) + if err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) + }) + if err != nil { + errs = append(errs, err) + } + err = r.ChildRun("invalid-digest-format", parent, func(r *runner, res *results) error { + errs := []error{} + tdName := "invalid-manifest-digest" + r.State.Data[tdName] = newTestData("Invalid Manifest Digest") + manDig, err := r.State.Data[tdName].genManifestFull(genWithLayerCount(1)) + if err != nil { + return err + } + for dig := range r.State.Data[tdName].blobs { + err := r.TestPushBlobAny(res, tdName, repo, dig) + if err != nil { + errs = append(errs, err) + } + } + // push digest "sha256:baddigeststring" + err = r.ChildRun("manifest-put", res, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIManifestPutDigest); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, stateAPIManifestPutDigest) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.ManifestPut(r.Config.schemeReg, repo, "sha256:baddigeststring", manDig, r.State.Data[tdName], r.Config.APIs.Referrer, nil, + apiWithFlag("ExpectBadDigest"), apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, tdName, stateAPIManifestPutDigest) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIManifestPutDigest) + return nil + }) + if err != nil { + errs = append(errs, err) + } + // pull digest "sha256:baddigeststring" + err = r.ChildRun("manifest-get", res, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIManifestGetDigest); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, stateAPIManifestGetDigest) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.ManifestGetReq(r.Config.schemeReg, repo, "sha256:baddigeststring", manDig, r.State.Data[tdName], + apiExpectStatus(http.StatusNotFound, http.StatusBadRequest), apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, tdName, stateAPIManifestGetDigest) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIManifestGetDigest) + return nil + }) + if err != nil { + errs = append(errs, err) + } + // cleanup + err = r.TestDelete(res, tdName, repo) + if err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) + }) + if err != nil { + errs = append(errs, err) + } + + return errors.Join(errs...) +} + +func (r *runner) TestPing(parent *results) error { + return r.ChildRun("ping", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIPing); err != nil { + r.TestSkip(res, err, "", stateAPIPing) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.PingReq(r.Config.schemeReg, apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, "", stateAPIPing) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, "", stateAPIPing) + return nil + }) +} + +func (r *runner) TestPull(parent *results, tdName string, repo string) error { + return r.ChildRun("pull", parent, func(r *runner, res *results) error { + errs := []error{} + for tag, dig := range r.State.Data[tdName].tags { + err := r.TestPullManifestTag(res, tdName, repo, tag, dig, r.State.Data[tdName].pullOpts[dig]...) + if err != nil { + errs = append(errs, fmt.Errorf("failed to pull manifest by tag %s%.0w", tag, err)) + } + } + for i, dig := range r.State.Data[tdName].manOrder { + err := r.TestPullManifestDigest(res, tdName, repo, dig, r.State.Data[tdName].pullOpts[dig]...) + if err != nil { + errs = append(errs, fmt.Errorf("failed to pull manifest %d, digest %s%.0w", i, dig.String(), err)) + } + } + for dig := range r.State.Data[tdName].blobs { + err := r.TestPullBlob(res, tdName, repo, dig, r.State.Data[tdName].pullOpts[dig]...) + if err != nil { + errs = append(errs, fmt.Errorf("failed to pull blob %s%.0w", dig.String(), err)) + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil + }) +} + +func (r *runner) TestPullBlob(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + flags := r.API.GetFlags(opts...) + if flags["SkipPullTest"] { + return nil + } + return r.ChildRun("blob-get", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobGetFull); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobGetFull) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + opts = append(opts, apiSaveOutput(res.Output)) + if r.Config.APIs.Blobs.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.BlobGetExistsFull(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobGetFull) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobGetFull) + return nil + }) +} + +func (r *runner) TestPullManifestDigest(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + flags := r.API.GetFlags(opts...) + if flags["SkipPullTest"] { + return nil + } + td := r.State.Data[tdName] + apis := []stateAPIType{} + return r.ChildRun("manifest-by-digest", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIManifestGetDigest); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, stateAPIManifestGetDigest) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + apis = append(apis, stateAPIManifestGetDigest) + opts = append(opts, apiSaveOutput(res.Output)) + if r.Config.APIs.Manifests.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.ManifestGetExists(r.Config.schemeReg, repo, dig.String(), dig, td, opts...); err != nil { + r.TestFail(res, err, tdName, apis...) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, apis...) + return nil + }) +} + +func (r *runner) TestPullManifestTag(parent *results, tdName string, repo string, tag string, dig digest.Digest, opts ...apiDoOpt) error { + flags := r.API.GetFlags(opts...) + if flags["SkipPullTest"] { + return nil + } + td := r.State.Data[tdName] + apis := []stateAPIType{} + return r.ChildRun("manifest-by-tag", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIManifestGetTag); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, stateAPIManifestGetTag) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + apis = append(apis, stateAPIManifestGetTag) + opts = append(opts, apiSaveOutput(res.Output)) + if r.Config.APIs.Manifests.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.ManifestGetExists(r.Config.schemeReg, repo, tag, dig, td, opts...); err != nil { + r.TestFail(res, err, tdName, apis...) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, apis...) + return nil + }) +} + +func (r *runner) TestPush(parent *results, tdName string, repo string) error { + return r.ChildRun("push", parent, func(r *runner, res *results) error { + errs := []error{} + for dig := range r.State.Data[tdName].blobs { + err := r.TestPushBlobAny(res, tdName, repo, dig, r.State.Data[tdName].pushOpts[dig]...) + if err != nil { + errs = append(errs, fmt.Errorf("failed to push blob %s%.0w", dig.String(), err)) + } + } + for i, dig := range r.State.Data[tdName].manOrder { + err := r.TestPushManifestDigest(res, tdName, repo, dig, r.State.Data[tdName].pushOpts[dig]...) + if err != nil { + errs = append(errs, fmt.Errorf("failed to push manifest %d, digest %s%.0w", i, dig.String(), err)) + } + } + for tag, dig := range r.State.Data[tdName].tags { + err := r.TestPushManifestTag(res, tdName, repo, tag, dig, r.State.Data[tdName].pushOpts[dig]...) + if err != nil { + errs = append(errs, fmt.Errorf("failed to push manifest tag %s%.0w", tag, err)) + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil + }) +} + +var ( + blobAPIs = []stateAPIType{stateAPIBlobPostPut, stateAPIBlobPostOnly, stateAPIBlobPatchStream, stateAPIBlobPatchChunked} + blobAPIsTested = [stateAPIMax]bool{} + blobAPIsTestedByAlgo = map[digest.Algorithm]*[stateAPIMax]bool{} +) + +func (r *runner) TestPushBlobAny(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + if err := r.APIRequire(stateAPIBlobPush); err != nil { + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + apis := []stateAPIType{} + if _, ok := blobAPIsTestedByAlgo[dig.Algorithm()]; !ok { + blobAPIsTestedByAlgo[dig.Algorithm()] = &[stateAPIMax]bool{} + } + // first try untested APIs + for _, api := range blobAPIs { + if !blobAPIsTested[api] { + apis = append(apis, api) + } + } + // then untested with a given algorithm + for _, api := range blobAPIs { + if !blobAPIsTestedByAlgo[dig.Algorithm()][api] && !slices.Contains(apis, api) { + apis = append(apis, api) + } + } + // next use APIs that are known successful + for _, api := range blobAPIs { + if r.State.APIStatus[api] == statusPass && !slices.Contains(apis, api) { + apis = append(apis, api) + } + } + // lastly use APIs in preferred order + for _, api := range blobAPIs { + if !slices.Contains(apis, api) { + apis = append(apis, api) + } + } + // return on the first successful API + errs := []error{} + for _, api := range apis { + var err error + switch api { + case stateAPIBlobPostPut: + err = r.TestPushBlobPostPut(parent, tdName, repo, dig, opts...) + case stateAPIBlobPostOnly: + err = r.TestPushBlobPostOnly(parent, tdName, repo, dig, opts...) + case stateAPIBlobPatchStream: + err = r.TestPushBlobPatchStream(parent, tdName, repo, dig, opts...) + case stateAPIBlobPatchChunked: + err = r.TestPushBlobPatchChunked(parent, tdName, repo, dig, opts...) + default: + err = fmt.Errorf("blob API %s is not handled by TestPushBlobAny", api.String()) + } + blobAPIsTested[api] = true + blobAPIsTestedByAlgo[dig.Algorithm()][api] = true + if err == nil { + return nil + } + errs = append(errs, err) + } + return errors.Join(errs...) +} + +func (r *runner) TestPushBlobMount(parent *results, tdName string, repo, repo2 string, dig digest.Digest) error { + return r.ChildRun("blob-mount", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobMountSource); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobMountSource) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.BlobMount(r.Config.schemeReg, repo, repo2, dig, r.State.Data[tdName], apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobMountSource) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobMountSource) + return nil + }) +} + +func (r *runner) TestPushBlobMountAnonymous(parent *results, tdName string, repo string, dig digest.Digest) error { + return r.ChildRun("blob-mount-anonymous", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobMountAnonymous); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobMountAnonymous) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.BlobMount(r.Config.schemeReg, repo, "", dig, r.State.Data[tdName], apiSaveOutput(res.Output)); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobMountAnonymous) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobMountAnonymous) + return nil + }) +} + +func (r *runner) TestPushBlobMountMissing(parent *results, tdName string, repo, repo2 string, dig digest.Digest) error { + return r.ChildRun("blob-mount", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobMountSource); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobMountSource) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + if err := r.API.BlobMount(r.Config.schemeReg, repo, repo2, dig, r.State.Data[tdName], apiSaveOutput(res.Output)); !errors.Is(err, errRegUnsupported) { + if err == nil { + err = fmt.Errorf("blob mount of missing blob incorrectly succeeded") + } + r.TestFail(res, err, tdName, stateAPIBlobMountSource) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobMountSource) + return nil + }) +} + +func (r *runner) TestPushBlobPostCancel(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + return r.ChildRun("blob-post-cancel", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobPush, stateAPIBlobCancel); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobPush, stateAPIBlobCancel) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + opts = append(opts, apiSaveOutput(res.Output)) + if err := r.API.BlobPostCancel(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobPush, stateAPIBlobCancel) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobPush, stateAPIBlobCancel) + return nil + }) +} + +func (r *runner) TestPushBlobPostPut(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + return r.ChildRun("blob-post-put", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobPostPut); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobPostPut) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + opts = append(opts, apiSaveOutput(res.Output)) + if r.Config.APIs.Blobs.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.BlobPostPut(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobPostPut) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobPostPut, stateAPIBlobPush) + return nil + }) +} + +func (r *runner) TestPushBlobPostOnly(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + return r.ChildRun("blob-post-only", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobPostOnly); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobPostOnly) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + opts = append(opts, apiSaveOutput(res.Output)) + if r.Config.APIs.Blobs.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.BlobPostOnly(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobPostOnly) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobPostOnly, stateAPIBlobPush) + return nil + }) +} + +func (r *runner) TestPushBlobPatchChunked(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + return r.ChildRun("blob-patch-chunked", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobPatchChunked); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobPatchChunked) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + opts = append(opts, apiSaveOutput(res.Output)) + if r.Config.APIs.Blobs.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.BlobPatchChunked(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobPatchChunked) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobPatchChunked, stateAPIBlobPush) + return nil + }) +} + +func (r *runner) TestPushBlobPatchStream(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + return r.ChildRun("blob-patch-stream", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIBlobPatchStream); err != nil { + r.TestSkip(res, err, tdName, stateAPIBlobPatchStream) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + opts = append(opts, apiSaveOutput(res.Output)) + if r.Config.APIs.Blobs.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.BlobPatchStream(r.Config.schemeReg, repo, dig, r.State.Data[tdName], opts...); err != nil { + r.TestFail(res, err, tdName, stateAPIBlobPatchStream) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + r.TestPass(res, tdName, stateAPIBlobPatchStream, stateAPIBlobPush) + return nil + }) +} + +func (r *runner) TestPushManifestDigest(parent *results, tdName string, repo string, dig digest.Digest, opts ...apiDoOpt) error { + return r.ChildRun("manifest-by-digest", parent, func(r *runner, res *results) error { + td := r.State.Data[tdName] + apis := []stateAPIType{stateAPIManifestPutDigest} + resp := http.Response{Header: http.Header{}} + putOpts := []apiDoOpt{apiReturnResponse(&resp)} + subj := detectSubject(td.manifests[dig]) + if subj != nil { + apis = append(apis, stateAPIManifestPutSubject) + } + flags := r.API.GetFlags(opts...) + expectTags := []string{} + if flags["TagParam"] { + apis = append(apis, stateAPIManifestPutTagParam) + for tag, d := range td.tags { + if d == dig { + putOpts = append(putOpts, apiWithURLParam("tag", tag)) + expectTags = append(expectTags, tag) + } + } + } + for _, api := range apis { + if err := r.APIRequire(api); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, api) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + } + opts = append(opts, apiSaveOutput(res.Output)) + if r.Config.APIs.Manifests.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.ManifestPut(r.Config.schemeReg, repo, dig.String(), dig, td, r.Config.APIs.Referrer, putOpts, opts...); err != nil { + r.TestFail(res, err, tdName, apis...) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + if len(expectTags) > 0 { + errs := []error{} + for _, tag := range expectTags { + if slices.ContainsFunc(resp.Header.Values("OCI-Tag"), func(v string) bool { + return slices.ContainsFunc(strings.Split(v, ","), func(s string) bool { + return strings.TrimSpace(s) == tag + }) + }) { + td.tagPushed[tag] = true + } else { + errs = append(errs, fmt.Errorf("header missing OCI-Tag: %s%.0w", tag, errRegUnsupported)) + } + } + if len(errs) > 0 { + r.TestFail(res, errors.Join(errs...), tdName, stateAPIManifestPutTagParam) + return fmt.Errorf("%.0w%w", errAPITestFail, errors.Join(errs...)) + } + } + r.TestPass(res, tdName, apis...) + return nil + }) +} + +func (r *runner) TestPushManifestTag(parent *results, tdName string, repo string, tag string, dig digest.Digest, opts ...apiDoOpt) error { + td := r.State.Data[tdName] + if td.tagPushed[tag] { + return nil // tag already pushed (likely as a tag parameter in the digest push) + } + apis := []stateAPIType{} + subj := detectSubject(td.manifests[dig]) + if subj != nil { + apis = append(apis, stateAPIManifestPutSubject) + } + return r.ChildRun("manifest-by-tag", parent, func(r *runner, res *results) error { + if err := r.APIRequire(stateAPIManifestPutTag); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, stateAPIManifestPutTag) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + apis = append(apis, stateAPIManifestPutTag) + opts = append(opts, apiSaveOutput(res.Output)) + if r.Config.APIs.Manifests.DigestHeader { + opts = append(opts, apiWithFlag("RequireDigestHeader")) + } + if err := r.API.ManifestPut(r.Config.schemeReg, repo, tag, dig, td, r.Config.APIs.Referrer, nil, opts...); err != nil { + r.TestFail(res, err, tdName, apis...) + return fmt.Errorf("%.0w%w", errAPITestFail, err) + } + td.tagPushed[tag] = true + r.TestPass(res, tdName, apis...) + return nil + }) +} + +func (r *runner) TestReferrers(parent *results, tdName string, repo string) error { + if len(r.State.Data[tdName].referrers) == 0 { + return nil + } + return r.ChildRun("referrers", parent, func(r *runner, res *results) error { + errs := []error{} + for subj, referrerGoal := range r.State.Data[tdName].referrers { + if err := r.APIRequire(stateAPIReferrers); err != nil { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusSkip) + r.TestSkip(res, err, tdName, stateAPIReferrers) + return fmt.Errorf("%.0w%w", errAPITestSkip, err) + } + referrerResp, err := r.API.ReferrersList(r.Config.schemeReg, repo, subj, apiSaveOutput(res.Output)) + if err != nil { + errs = append(errs, err) + } + if err == nil { + for _, goal := range referrerGoal { + if !slices.ContainsFunc(referrerResp.Manifests, func(resp image.Descriptor) bool { + return resp.Digest == goal.Digest && + resp.MediaType == goal.MediaType && + resp.Size == goal.Size && + resp.ArtifactType == goal.ArtifactType && + mapContainsAll(resp.Annotations, goal.Annotations) + }) { + errs = append(errs, fmt.Errorf("entry missing from referrers list, subject %s, referrer %+v%.0w", subj, goal, errAPITestFail)) + } + } + } + referrerATs := map[string]bool{} + for _, goal := range referrerGoal { + referrerATs[goal.ArtifactType] = true + } + // search for referrers filtered by artifactType + for referrerAT := range referrerATs { + var filtersHeader string + referrerResp, err := r.API.ReferrersList(r.Config.schemeReg, repo, subj, apiSaveOutput(res.Output), + apiWithURLParam("artifactType", referrerAT), + apiReturnHeader("OCI-Filters-Applied", &filtersHeader)) + if err != nil { + errs = append(errs, err) + } + if err == nil { + filtersApplied := false + for filter := range strings.SplitSeq(filtersHeader, ",") { + if strings.TrimSpace(filter) == "artifactType" { + filtersApplied = true + break + } + } + if !filtersApplied { + errs = append(errs, fmt.Errorf("registry does not set the expected OCI-Filters-Applied header for the artifactType%.0w", errRegUnsupported)) + } + for _, goal := range referrerGoal { + if (!filtersApplied || goal.ArtifactType == referrerAT) && !slices.ContainsFunc(referrerResp.Manifests, func(resp image.Descriptor) bool { + return resp.Digest == goal.Digest && + resp.MediaType == goal.MediaType && + resp.Size == goal.Size && + resp.ArtifactType == goal.ArtifactType && + mapContainsAll(resp.Annotations, goal.Annotations) + }) { + errs = append(errs, fmt.Errorf("entry missing from referrers list, subject %s, referrer %+v%.0w", subj, goal, errAPITestFail)) + } + } + if filtersApplied { + // verify no other entries are returned + for _, resp := range referrerResp.Manifests { + if resp.ArtifactType != referrerAT { + errs = append(errs, fmt.Errorf("referrers filter for artifactType %s included descriptor %v%.0w", referrerAT, resp, errAPITestError)) + } + } + } + } + } + } + if len(errs) > 0 { + r.TestFail(res, errors.Join(errs...), tdName, stateAPIReferrers) + return fmt.Errorf("%.0w%w", errAPITestFail, errors.Join(errs...)) + } + r.TestPass(res, tdName, stateAPIReferrers) + return nil + }) +} + +func mapContainsAll[K comparable, V comparable](check, goal map[K]V) bool { + if len(goal) == 0 { + return true + } + for k, v := range goal { + if found, ok := check[k]; !ok || found != v { + return false + } + } + return true +} + +func (r *runner) ChildRun(name string, parent *results, fn func(*runner, *results) error) error { + res := resultsNew(name, parent) + // HasPrefix goes both ways, to include all parents to the prefix, and then all children of the selected prefix + if r.Config.FilterTest != "" && !strings.HasPrefix(r.Config.FilterTest, res.Name) && !strings.HasPrefix(res.Name, r.Config.FilterTest) { + return fmt.Errorf("test filter %s excludes %s%.0w", r.Config.FilterTest, res.Name, errAPITestDisabled) + } + if parent != nil { + parent.Children = append(parent.Children, res) + } + err := fn(r, res) + res.Stop = time.Now() + if err != nil && !errors.Is(err, errAPITestFail) && !errors.Is(err, errAPITestSkip) && !errors.Is(err, errAPITestDisabled) { + res.Errs = append(res.Errs, err) + res.Status = res.Status.Set(statusError) + res.Counts[statusError]++ + } + if parent != nil { + for i := range statusMax { + parent.Counts[i] += res.Counts[i] + } + parent.Status = parent.Status.Set(res.Status) + } + return err +} + +func (r *runner) TestSkip(res *results, err error, tdName string, apis ...stateAPIType) { + s := statusSkip + if errors.Is(err, errAPITestError) { + s = statusError + } else if errors.Is(err, errAPITestFail) { + s = statusFail + } else if errors.Is(err, errAPITestDisabled) { + s = statusDisabled + } + res.Status = res.Status.Set(s) + res.Counts[s]++ + if tdName != "" { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(s) + } + for _, a := range apis { + r.State.APIStatus[a] = r.State.APIStatus[a].Set(s) + } + fmt.Fprintf(res.Output, "%s: skipping test:\n %s\n", res.Name, + strings.ReplaceAll(err.Error(), "\n", "\n ")) + r.Log.Info("skipping test", "name", res.Name, "error", err.Error()) +} + +func (r *runner) TestFail(res *results, err error, tdName string, apis ...stateAPIType) { + s := statusFail + if errors.Is(err, errAPITestError) { + s = statusError + } else if errors.Is(err, errAPITestDisabled) { + s = statusDisabled + } else if errors.Is(err, errRegUnsupported) { + s = statusSkip + } + res.Status = res.Status.Set(s) + res.Counts[s]++ + res.Errs = append(res.Errs, err) + if tdName != "" { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(s) + } + for _, a := range apis { + r.State.APIStatus[a] = r.State.APIStatus[a].Set(s) + } + if s == statusFail { + r.Log.Warn("failed test", "name", res.Name, "error", err.Error()) + r.Log.Debug("failed test output", "name", res.Name, "output", res.Output.String()) + } +} + +func (r *runner) TestPass(res *results, tdName string, apis ...stateAPIType) { + res.Status = res.Status.Set(statusPass) + res.Counts[statusPass]++ + if tdName != "" { + r.State.DataStatus[tdName] = r.State.DataStatus[tdName].Set(statusPass) + } + for _, a := range apis { + r.State.APIStatus[a] = r.State.APIStatus[a].Set(statusPass) + } + r.Log.Info("passing test", "name", res.Name) + r.Log.Debug("passing test output", "name", res.Name, "output", res.Output.String()) +} + +func (r *runner) APIRequire(apis ...stateAPIType) error { + errs := []error{} + for _, a := range apis { + aText, err := a.MarshalText() + if err != nil { + errs = append(errs, fmt.Errorf("unknown api %d", a)) + continue + } + // check the configuration disables the api + configDisabled := false + switch a { + case stateAPIPing: + if !r.Config.APIs.Ping { + configDisabled = true + } + case stateAPITagList: + if !r.Config.APIs.Tags.List { + configDisabled = true + } + case stateAPIManifestHeadTag, stateAPIManifestHeadDigest, stateAPIManifestGetTag, stateAPIManifestGetDigest, + stateAPIBlobHead, stateAPIBlobGetFull, stateAPIBlobGetRange: + if !r.Config.APIs.Pull { + configDisabled = true + } + case stateAPIManifestPutTag, stateAPIManifestPutDigest, stateAPIManifestPutSubject, + stateAPIBlobPush, stateAPIBlobPostOnly, stateAPIBlobPostPut, + stateAPIBlobPatchChunked, stateAPIBlobPatchStream, stateAPIBlobMountSource: + if !r.Config.APIs.Push { + configDisabled = true + } + case stateAPIManifestPutTagParam: + if !r.Config.APIs.Push || !r.Config.APIs.Manifests.TagParam { + configDisabled = true + } + case stateAPIBlobCancel: + if !r.Config.APIs.Blobs.UploadCancel { + configDisabled = true + } + case stateAPIBlobMountAnonymous: + if !r.Config.APIs.Push || !r.Config.APIs.Blobs.MountAnonymous { + configDisabled = true + } + case stateAPITagDelete: + if !r.Config.APIs.Tags.Delete { + configDisabled = true + } + case stateAPITagDeleteAtomic: + if !r.Config.APIs.Tags.Delete || !r.Config.APIs.Tags.Atomic { + configDisabled = true + } + case stateAPIManifestDelete: + if !r.Config.APIs.Manifests.Delete { + configDisabled = true + } + case stateAPIManifestDeleteAtomic: + if !r.Config.APIs.Manifests.Atomic { + configDisabled = true + } + case stateAPIBlobDelete: + if !r.Config.APIs.Blobs.Delete { + configDisabled = true + } + case stateAPIBlobDeleteAtomic: + if !r.Config.APIs.Blobs.Atomic { + configDisabled = true + } + case stateAPIReferrers: + if !r.Config.APIs.Referrer { + configDisabled = true + } + default: + return fmt.Errorf("APIRequire check is missing for state %s%.0w", a.String(), errAPITestError) + } + if configDisabled { + errs = append(errs, fmt.Errorf("api %s is disabled in the configuration%.0w", aText, errAPITestDisabled)) + } + // do not check the [r.global.apiState] since tests may pass or fail based on different input data + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} diff --git a/conformance/setup.go b/conformance/setup.go deleted file mode 100644 index b9666a87..00000000 --- a/conformance/setup.go +++ /dev/null @@ -1,677 +0,0 @@ -// Copyright contributors to the Open Containers Distribution Specification -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package conformance - -import ( - "bytes" - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - "log" - "math/big" - mathrand "math/rand" - "os" - "path/filepath" - "runtime" - "strconv" - - "github.com/bloodorangeio/reggie" - "github.com/google/uuid" - g "github.com/onsi/ginkgo/v2" - "github.com/onsi/ginkgo/v2/formatter" - godigest "github.com/opencontainers/go-digest" -) - -type ( - TagList struct { - Name string `json:"name"` - Tags []string `json:"tags"` - } - - TestBlob struct { - Content []byte - ContentLength string - Digest string - } -) - -const ( - pull = 1 << iota - push - contentDiscovery - contentManagement - numWorkflows - - BLOB_UNKNOWN = iota - BLOB_UPLOAD_INVALID - BLOB_UPLOAD_UNKNOWN - DIGEST_INVALID - MANIFEST_BLOB_UNKNOWN - MANIFEST_INVALID - MANIFEST_UNKNOWN - MANIFEST_UNVERIFIED - NAME_INVALID - NAME_UNKNOWN - SIZE_INVALID - TAG_INVALID - UNAUTHORIZED - DENIED - UNSUPPORTED - - envVarRootURL = "OCI_ROOT_URL" - envVarNamespace = "OCI_NAMESPACE" - envVarUsername = "OCI_USERNAME" - envVarPassword = "OCI_PASSWORD" - envVarDebug = "OCI_DEBUG" - envVarPull = "OCI_TEST_PULL" - envVarPush = "OCI_TEST_PUSH" - envVarContentDiscovery = "OCI_TEST_CONTENT_DISCOVERY" - envVarContentManagement = "OCI_TEST_CONTENT_MANAGEMENT" - envVarPushEmptyLayer = "OCI_SKIP_EMPTY_LAYER_PUSH_TEST" - envVarBlobDigest = "OCI_BLOB_DIGEST" - envVarManifestDigest = "OCI_MANIFEST_DIGEST" - envVarTagName = "OCI_TAG_NAME" - envVarTagList = "OCI_TAG_LIST" - envVarHideSkippedWorkflows = "OCI_HIDE_SKIPPED_WORKFLOWS" - envVarAuthScope = "OCI_AUTH_SCOPE" - envVarDeleteManifestBeforeBlobs = "OCI_DELETE_MANIFEST_BEFORE_BLOBS" - envVarCrossmountNamespace = "OCI_CROSSMOUNT_NAMESPACE" - envVarAutomaticCrossmount = "OCI_AUTOMATIC_CROSSMOUNT" - envVarReportDir = "OCI_REPORT_DIR" - - emptyLayerTestTag = "emptylayer" - testTagName = "tagtest0" - - titlePull = "Pull" - titlePush = "Push" - titleContentDiscovery = "Content Discovery" - titleContentManagement = "Content Management" - - // layerBase64String is a base64 encoding of a simple tarball, obtained like this: - // $ echo 'you bothered to find out what was in here. Congratulations!' > test.txt - // $ tar czvf test.tar.gz test.txt - // $ cat test.tar.gz | base64 - layerBase64String = "H4sIAAAAAAAAA+3OQQrCMBCF4a49xXgBSUnaHMCTRBptQRNpp6i3t0UEV7oqIv7fYgbmzeJpHHSjVy0" + - "WZCa1c/MufWVe94N3RWlrZ72x3k/30nhbFWKWLPU0Dhp6keJ8im//PuU/6pZH2WVtYx8b0Sz7LjWSR5VLG6YRBumSzOlGtjkd+qD" + - "jMWiX07Befbs7AAAAAAAAAAAAAAAAAPyzO34MnqoAKAAA" - - // filter types - artifactTypeFilter = "artifactType" -) - -var ( - testMap = map[string]int{ - envVarPull: pull, - envVarPush: push, - envVarContentDiscovery: contentDiscovery, - envVarContentManagement: contentManagement, - } - - testBlobA []byte - testBlobALength string - testBlobADigest string - testRefBlobA []byte - testRefBlobALength string - testRefBlobADigest string - testRefArtifactTypeA string - testRefArtifactTypeB string - testRefArtifactTypeIndex string - testRefBlobB []byte - testRefBlobBLength string - testRefBlobBDigest string - testBlobB []byte - testBlobBDigest string - testBlobBChunk1 []byte - testBlobBChunk1Length string - testBlobBChunk2 []byte - testBlobBChunk2Length string - testBlobBChunk1Range string - testBlobBChunk2Range string - testAnnotationKey string - testAnnotationValues map[string]string - client *reggie.Client - crossmountNamespace string - dummyDigest string - errorCodes []string - invalidManifestContent []byte - layerBlobData []byte - layerBlobDigest string - layerBlobContentLength string - emptyLayerManifestContent []byte - emptyLayerManifestDigest string - nonexistentManifest string - emptyJSONBlob []byte - emptyJSONDescriptor descriptor - refsManifestAConfigArtifactContent []byte - refsManifestAConfigArtifactDigest string - refsManifestALayerArtifactContent []byte - refsManifestALayerArtifactDigest string - refsManifestBConfigArtifactContent []byte - refsManifestBConfigArtifactDigest string - refsManifestBLayerArtifactContent []byte - refsManifestBLayerArtifactDigest string - refsManifestCLayerArtifactContent []byte - refsManifestCLayerArtifactDigest string - refsIndexArtifactContent []byte - refsIndexArtifactDigest string - reportJUnitFilename string - reportHTMLFilename string - httpWriter *httpDebugWriter - testsToRun int - suiteDescription string - runPullSetup bool - runPushSetup bool - runContentDiscoverySetup bool - runContentManagementSetup bool - deleteManifestBeforeBlobs bool - runAutomaticCrossmountTest bool - automaticCrossmountEnabled bool - configs []TestBlob - manifests []TestBlob - seed int64 - Version = "unknown" -) - -func init() { - var err error - - seed = g.GinkgoRandomSeed() - hostname := os.Getenv(envVarRootURL) - namespace := os.Getenv(envVarNamespace) - username := os.Getenv(envVarUsername) - password := os.Getenv(envVarPassword) - authScope := os.Getenv(envVarAuthScope) - crossmountNamespace = os.Getenv(envVarCrossmountNamespace) - if len(crossmountNamespace) == 0 { - crossmountNamespace = fmt.Sprintf("conformance-%s", uuid.New()) - } - - debug, _ := strconv.ParseBool(os.Getenv(envVarDebug)) - - for envVar, enableTest := range testMap { - if varIsTrue, _ := strconv.ParseBool(os.Getenv(envVar)); varIsTrue { - testsToRun |= enableTest - } - } - - httpWriter = newHTTPDebugWriter(debug) - logger := newHTTPDebugLogger(httpWriter) - client, err = reggie.NewClient(hostname, - reggie.WithDefaultName(namespace), - reggie.WithUsernamePassword(username, password), - reggie.WithDebug(true), - reggie.WithUserAgent("distribution-spec-conformance-tests"), - reggie.WithAuthScope(authScope), - reggie.WithInsecureSkipTLSVerify(true)) - if err != nil { - panic(err) - } - - client.SetLogger(logger) - client.SetCookieJar(nil) - - // create a unique config for each workflow category - for i := 0; i < numWorkflows; i++ { - - // in order to get a unique blob digest, we create a new author - // field for the config on each run. - randomAuthor := randomString(16) - config := image{ - Architecture: "amd64", - OS: "linux", - RootFS: rootFS{ - Type: "layers", - DiffIDs: []godigest.Digest{}, - }, - Author: randomAuthor, - } - configBlobContent, err := json.MarshalIndent(&config, "", "\t") - if err != nil { - log.Fatal(err) - } - - configBlobContentLength := strconv.Itoa(len(configBlobContent)) - configBlobDigestRaw := godigest.FromBytes(configBlobContent) - configBlobDigest := configBlobDigestRaw.String() - if v := os.Getenv(envVarBlobDigest); v != "" { - configBlobDigest = v - } - - configs = append(configs, TestBlob{ - Content: configBlobContent, - ContentLength: configBlobContentLength, - Digest: configBlobDigest, - }) - } - - layerBlobData, err = base64.StdEncoding.DecodeString(layerBase64String) - if err != nil { - log.Fatal(err) - } - - layerBlobDigestRaw := godigest.FromBytes(layerBlobData) - layerBlobDigest = layerBlobDigestRaw.String() - layerBlobContentLength = fmt.Sprintf("%d", len(layerBlobData)) - - layers := []descriptor{{ - MediaType: "application/vnd.oci.image.layer.v1.tar+gzip", - Size: int64(len(layerBlobData)), - Digest: layerBlobDigestRaw, - }} - - // create a unique manifest for each workflow category - for i := 0; i < numWorkflows; i++ { - manifest := manifest{ - SchemaVersion: 2, - MediaType: "application/vnd.oci.image.manifest.v1+json", - Config: descriptor{ - MediaType: "application/vnd.oci.image.config.v1+json", - Digest: godigest.Digest(configs[i].Digest), - Size: int64(len(configs[i].Content)), - Data: configs[i].Content, // must be the config content. - NewUnspecifiedField: []byte("hello world"), // content doesn't matter. - }, - Layers: layers, - } - - manifestContent, err := json.MarshalIndent(&manifest, "", "\t") - if err != nil { - log.Fatal(err) - } - - manifestContentLength := strconv.Itoa(len(manifestContent)) - manifestDigest := godigest.FromBytes(manifestContent).String() - if v := os.Getenv(envVarManifestDigest); v != "" { - manifestDigest = v - } - - manifests = append(manifests, TestBlob{ - Content: manifestContent, - ContentLength: manifestContentLength, - Digest: manifestDigest, - }) - } - - // used in push test - emptyLayerManifest := manifest{ - SchemaVersion: 2, - Config: descriptor{ - MediaType: "application/vnd.oci.image.config.v1+json", - Digest: godigest.Digest(configs[1].Digest), - Size: int64(len(configs[1].Content)), - Data: configs[1].Content, // must be the config content. - NewUnspecifiedField: []byte("hello world"), // content doesn't matter. - }, - Layers: []descriptor{}, - } - - emptyLayerManifestContent, err = json.MarshalIndent(&emptyLayerManifest, "", "\t") - if err != nil { - log.Fatal(err) - } - emptyLayerManifestDigest = string(godigest.FromBytes(emptyLayerManifestContent)) - - nonexistentManifest = ".INVALID_MANIFEST_NAME" - invalidManifestContent = []byte("blablabla") - - dig, blob := randomBlob(42, seed+1) - testBlobA = blob - testBlobALength = strconv.Itoa(len(testBlobA)) - testBlobADigest = dig.String() - - setupChunkedBlob(42) - - // used in referrers test (artifacts with Subject field set) - emptyJSONBlob = []byte("{}") - emptyJSONDescriptor = descriptor{ - MediaType: "application/vnd.oci.empty.v1+json", - Size: int64(len(emptyJSONBlob)), - Digest: godigest.FromBytes(emptyJSONBlob), - } - - testRefBlobA = []byte("NHL Peanut Butter on my NHL bagel") - testRefBlobALength = strconv.Itoa(len(testRefBlobA)) - testRefBlobADigest = godigest.FromBytes(testRefBlobA).String() - - testRefArtifactTypeA = "application/vnd.nhl.peanut.butter.bagel" - - testRefBlobB = []byte("NBA Strawberry Jam on my NBA croissant") - testRefBlobBLength = strconv.Itoa(len(testRefBlobB)) - testRefBlobBDigest = godigest.FromBytes(testRefBlobB).String() - - testRefArtifactTypeB = "application/vnd.nba.strawberry.jam.croissant" - - testAnnotationKey = "org.opencontainers.conformance.test" - testAnnotationValues = map[string]string{} - - // artifact with Subject ref using config.MediaType = artifactType - refsManifestAConfigArtifact := manifest{ - SchemaVersion: 2, - MediaType: "application/vnd.oci.image.manifest.v1+json", - Config: descriptor{ - MediaType: testRefArtifactTypeA, - Size: int64(len(testRefBlobA)), - Digest: godigest.FromBytes(testRefBlobA), - }, - Subject: &descriptor{ - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(len(manifests[4].Content)), - Digest: godigest.FromBytes(manifests[4].Content), - }, - Layers: []descriptor{ - emptyJSONDescriptor, - }, - Annotations: map[string]string{ - testAnnotationKey: "test config a", - }, - } - - refsManifestAConfigArtifactContent, err = json.MarshalIndent(&refsManifestAConfigArtifact, "", "\t") - if err != nil { - log.Fatal(err) - } - - refsManifestAConfigArtifactDigest = godigest.FromBytes(refsManifestAConfigArtifactContent).String() - testAnnotationValues[refsManifestAConfigArtifactDigest] = refsManifestAConfigArtifact.Annotations[testAnnotationKey] - - refsManifestBConfigArtifact := manifest{ - SchemaVersion: 2, - MediaType: "application/vnd.oci.image.manifest.v1+json", - Config: descriptor{ - MediaType: testRefArtifactTypeB, - Size: int64(len(testRefBlobB)), - Digest: godigest.FromBytes(testRefBlobB), - }, - Subject: &descriptor{ - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(len(manifests[4].Content)), - Digest: godigest.FromBytes(manifests[4].Content), - }, - Layers: []descriptor{ - emptyJSONDescriptor, - }, - Annotations: map[string]string{ - testAnnotationKey: "test config b", - }, - } - - refsManifestBConfigArtifactContent, err = json.MarshalIndent(&refsManifestBConfigArtifact, "", "\t") - if err != nil { - log.Fatal(err) - } - - refsManifestBConfigArtifactDigest = godigest.FromBytes(refsManifestBConfigArtifactContent).String() - testAnnotationValues[refsManifestBConfigArtifactDigest] = refsManifestBConfigArtifact.Annotations[testAnnotationKey] - - // artifact with Subject ref using ArtifactType, config.MediaType = emptyJSON - refsManifestALayerArtifact := manifest{ - SchemaVersion: 2, - MediaType: "application/vnd.oci.image.manifest.v1+json", - ArtifactType: testRefArtifactTypeA, - Config: emptyJSONDescriptor, - Subject: &descriptor{ - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(len(manifests[4].Content)), - Digest: godigest.FromBytes(manifests[4].Content), - }, - Layers: []descriptor{ - { - MediaType: testRefArtifactTypeA, - Size: int64(len(testRefBlobA)), - Digest: godigest.FromBytes(testRefBlobA), - }, - }, - Annotations: map[string]string{ - testAnnotationKey: "test layer a", - }, - } - - refsManifestALayerArtifactContent, err = json.MarshalIndent(&refsManifestALayerArtifact, "", "\t") - if err != nil { - log.Fatal(err) - } - - refsManifestALayerArtifactDigest = godigest.FromBytes(refsManifestALayerArtifactContent).String() - testAnnotationValues[refsManifestALayerArtifactDigest] = refsManifestALayerArtifact.Annotations[testAnnotationKey] - - refsManifestBLayerArtifact := manifest{ - SchemaVersion: 2, - MediaType: "application/vnd.oci.image.manifest.v1+json", - ArtifactType: testRefArtifactTypeB, - Config: emptyJSONDescriptor, - Subject: &descriptor{ - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(len(manifests[4].Content)), - Digest: godigest.FromBytes(manifests[4].Content), - }, - Layers: []descriptor{ - { - MediaType: testRefArtifactTypeB, - Size: int64(len(testRefBlobB)), - Digest: godigest.FromBytes(testRefBlobB), - }, - }, - Annotations: map[string]string{ - testAnnotationKey: "test layer b", - }, - } - - refsManifestBLayerArtifactContent, err = json.MarshalIndent(&refsManifestBLayerArtifact, "", "\t") - if err != nil { - log.Fatal(err) - } - - refsManifestBLayerArtifactDigest = godigest.FromBytes(refsManifestBLayerArtifactContent).String() - testAnnotationValues[refsManifestBLayerArtifactDigest] = refsManifestBLayerArtifact.Annotations[testAnnotationKey] - - // ManifestCLayerArtifact is the same as B but based on a subject that has not been pushed - refsManifestCLayerArtifact := manifest{ - SchemaVersion: 2, - MediaType: "application/vnd.oci.image.manifest.v1+json", - ArtifactType: testRefArtifactTypeB, - Config: emptyJSONDescriptor, - Subject: &descriptor{ - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(len(manifests[3].Content)), - Digest: godigest.FromBytes(manifests[3].Content), - }, - Layers: []descriptor{ - { - MediaType: testRefArtifactTypeB, - Size: int64(len(testRefBlobB)), - Digest: godigest.FromBytes(testRefBlobB), - }, - }, - } - - refsManifestCLayerArtifactContent, err = json.MarshalIndent(&refsManifestCLayerArtifact, "", "\t") - if err != nil { - log.Fatal(err) - } - - refsManifestCLayerArtifactDigest = godigest.FromBytes(refsManifestCLayerArtifactContent).String() - - testRefArtifactTypeIndex = "application/vnd.food.stand" - refsIndexArtifact := index{ - SchemaVersion: 2, - MediaType: "application/vnd.oci.image.index.v1+json", - ArtifactType: testRefArtifactTypeIndex, - Manifests: []descriptor{ - { - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(len(refsManifestAConfigArtifactContent)), - Digest: godigest.FromBytes(refsManifestAConfigArtifactContent), - }, - { - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(len(refsManifestALayerArtifactContent)), - Digest: godigest.FromBytes(refsManifestALayerArtifactContent), - }, - }, - Subject: &descriptor{ - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(len(manifests[4].Content)), - Digest: godigest.FromBytes(manifests[4].Content), - }, - Annotations: map[string]string{ - testAnnotationKey: "test index", - }, - } - refsIndexArtifactContent, err = json.MarshalIndent(&refsIndexArtifact, "", "\t") - if err != nil { - log.Fatal(err) - } - refsIndexArtifactDigest = godigest.FromBytes(refsIndexArtifactContent).String() - testAnnotationValues[refsIndexArtifactDigest] = refsIndexArtifact.Annotations[testAnnotationKey] - - dummyDigest = godigest.FromString("hello world").String() - - errorCodes = []string{ - BLOB_UNKNOWN: "BLOB_UNKNOWN", - BLOB_UPLOAD_INVALID: "BLOB_UPLOAD_INVALID", - BLOB_UPLOAD_UNKNOWN: "BLOB_UPLOAD_UNKNOWN", - DIGEST_INVALID: "DIGEST_INVALID", - MANIFEST_BLOB_UNKNOWN: "MANIFEST_BLOB_UNKNOWN", - MANIFEST_INVALID: "MANIFEST_INVALID", - MANIFEST_UNKNOWN: "MANIFEST_UNKNOWN", - MANIFEST_UNVERIFIED: "MANIFEST_UNVERIFIED", - NAME_INVALID: "NAME_INVALID", - NAME_UNKNOWN: "NAME_UNKNOWN", - SIZE_INVALID: "SIZE_INVALID", - TAG_INVALID: "TAG_INVALID", - UNAUTHORIZED: "UNAUTHORIZED", - DENIED: "DENIED", - UNSUPPORTED: "UNSUPPORTED", - } - - runPullSetup = true - runPushSetup = true - runContentDiscoverySetup = true - runContentManagementSetup = true - deleteManifestBeforeBlobs = true - - if os.Getenv(envVarTagName) != "" && - os.Getenv(envVarManifestDigest) != "" && - os.Getenv(envVarBlobDigest) != "" { - runPullSetup = false - } - - if os.Getenv(envVarTagList) != "" { - runContentDiscoverySetup = false - } - - if v, ok := os.LookupEnv(envVarDeleteManifestBeforeBlobs); ok { - deleteManifestBeforeBlobs, _ = strconv.ParseBool(v) - } - automaticCrossmountVal := "" - automaticCrossmountVal, runAutomaticCrossmountTest = os.LookupEnv(envVarAutomaticCrossmount) - automaticCrossmountEnabled, _ = strconv.ParseBool(automaticCrossmountVal) - - if dir := os.Getenv(envVarReportDir); dir != "none" { - reportJUnitFilename = filepath.Join(dir, "junit.xml") - reportHTMLFilename = filepath.Join(dir, "report.html") - } - suiteDescription = "OCI Distribution Conformance Tests" -} - -func SkipIfDisabled(test int) { - if userDisabled(test) { - report := generateSkipReport() - g.Skip(report) - } -} - -func RunOnlyIf(v bool) { - if !v { - g.Skip("you have skipped this test.") - } -} - -func RunOnlyIfNot(v bool) { - if v { - g.Skip("you have skipped this test.") - } -} - -func Warn(message string) { - // print message - fmt.Fprint(os.Stderr, formatter.Fi(2, "\n{{magenta}}WARNING: %s\n{{/}}", message)) - // print file:line - _, file, line, _ := runtime.Caller(1) - fmt.Fprint(os.Stderr, formatter.Fi(2, "\n%s:%d\n", file, line)) -} - -func generateSkipReport() string { - buf := new(bytes.Buffer) - fmt.Fprintf(buf, "you have skipped this test; if this is an error, check your environment variable settings:\n") - for k := range testMap { - fmt.Fprintf(buf, "\t%s=%s\n", k, os.Getenv(k)) - } - return buf.String() -} - -func userDisabled(test int) bool { - return !(test&testsToRun > 0) -} - -func getTagList(resp *reggie.Response) []string { - jsonData := resp.Body() - tagList := &TagList{} - err := json.Unmarshal(jsonData, tagList) - if err != nil { - return []string{} - } - - return tagList.Tags -} - -// Adapted from https://gist.github.com/dopey/c69559607800d2f2f90b1b1ed4e550fb -func randomString(n int) string { - const letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-" - ret := make([]byte, n) - for i := 0; i < n; i++ { - num, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) - if err != nil { - panic(err) - } - ret[i] = letters[num.Int64()] - } - return string(ret) -} - -// randomBlob outputs a reproducible random blob (based on the seed) for testing -func randomBlob(size int, seed int64) (godigest.Digest, []byte) { - r := mathrand.New(mathrand.NewSource(seed)) - b := make([]byte, size) - if n, err := r.Read(b); err != nil { - panic(err) - } else if n != size { - panic("unable to read enough bytes") - } - return godigest.FromBytes(b), b -} - -func setupChunkedBlob(size int) { - dig, blob := randomBlob(size, seed+2) - testBlobB = blob - testBlobBDigest = dig.String() - testBlobBChunk1 = testBlobB[:size/2+1] - testBlobBChunk1Length = strconv.Itoa(len(testBlobBChunk1)) - testBlobBChunk1Range = fmt.Sprintf("0-%d", len(testBlobBChunk1)-1) - testBlobBChunk2 = testBlobB[size/2+1:] - testBlobBChunk2Length = strconv.Itoa(len(testBlobBChunk2)) - testBlobBChunk2Range = fmt.Sprintf("%d-%d", len(testBlobBChunk1), len(testBlobB)-1) -} diff --git a/conformance/state.go b/conformance/state.go new file mode 100644 index 00000000..4e05940f --- /dev/null +++ b/conformance/state.go @@ -0,0 +1,189 @@ +package main + +import ( + "fmt" + "strings" +) + +type state struct { + APIStatus map[stateAPIType]status + Data map[string]*testData + DataStatus map[string]status +} + +func stateNew() *state { + return &state{ + APIStatus: map[stateAPIType]status{}, + Data: map[string]*testData{}, + DataStatus: map[string]status{}, + } +} + +type stateAPIType int + +const ( + stateAPITagList stateAPIType = iota + stateAPITagDelete + stateAPITagDeleteAtomic + stateAPIBlobCancel + stateAPIBlobPush // any blob push API + stateAPIBlobPostOnly + stateAPIBlobPostPut + stateAPIBlobPatchChunked + stateAPIBlobPatchStream + stateAPIBlobMountSource + stateAPIBlobMountAnonymous + stateAPIBlobGetFull + stateAPIBlobGetRange + stateAPIBlobHead + stateAPIBlobDelete + stateAPIBlobDeleteAtomic + stateAPIManifestPutDigest + stateAPIManifestPutTag + stateAPIManifestPutTagParam + stateAPIManifestPutSubject + stateAPIManifestGetDigest + stateAPIManifestGetTag + stateAPIManifestHeadDigest + stateAPIManifestHeadTag + stateAPIManifestDelete + stateAPIManifestDeleteAtomic + stateAPIReferrers + stateAPIPing + stateAPIMax // number of APIs for iterating +) + +func (a stateAPIType) String() string { + switch a { + default: + return "Unknown" + case stateAPITagList: + return "Tag listing" + case stateAPITagDelete: + return "Tag delete" + case stateAPITagDeleteAtomic: + return "Tag delete atomic" + case stateAPIBlobCancel: + return "Blob upload cancel" + case stateAPIBlobPush: + return "Blob push" + case stateAPIBlobPostOnly: + return "Blob post only" + case stateAPIBlobPostPut: + return "Blob post put" + case stateAPIBlobPatchChunked: + return "Blob chunked" + case stateAPIBlobPatchStream: + return "Blob streaming" + case stateAPIBlobMountSource: + return "Blob mount" + case stateAPIBlobMountAnonymous: + return "Blob anonymous mount" + case stateAPIBlobGetFull: + return "Blob get" + case stateAPIBlobGetRange: + return "Blob get range" + case stateAPIBlobHead: + return "Blob head" + case stateAPIBlobDelete: + return "Blob delete" + case stateAPIBlobDeleteAtomic: + return "Blob delete atomic" + case stateAPIManifestPutDigest: + return "Manifest put by digest" + case stateAPIManifestPutTag: + return "Manifest put by tag" + case stateAPIManifestPutTagParam: + return "Manifest put with tag params" + case stateAPIManifestPutSubject: + return "Manifest put with subject" + case stateAPIManifestGetDigest: + return "Manifest get by digest" + case stateAPIManifestGetTag: + return "Manifest get by tag" + case stateAPIManifestHeadDigest: + return "Manifest head by digest" + case stateAPIManifestHeadTag: + return "Manifest head by tag" + case stateAPIManifestDelete: + return "Manifest delete" + case stateAPIManifestDeleteAtomic: + return "Manifest delete atomic" + case stateAPIReferrers: + return "Referrers" + case stateAPIPing: + return "Ping" + } +} + +func (a stateAPIType) MarshalText() ([]byte, error) { + ret := a.String() + if ret == "unknown" { + return []byte(ret), fmt.Errorf("unknown API %d", a) + } + return []byte(ret), nil +} + +func (a *stateAPIType) UnmarshalText(b []byte) error { + switch strings.ToLower(string(b)) { + default: + return fmt.Errorf("unknown API %s", b) + case "Tag listing": + *a = stateAPITagList + case "Tag delete": + *a = stateAPITagDelete + case "Tag delete atomic": + *a = stateAPITagDeleteAtomic + case "Blob upload cancel": + *a = stateAPIBlobCancel + case "Blob push": + *a = stateAPIBlobPush + case "Blob post only": + *a = stateAPIBlobPostOnly + case "Blob post put": + *a = stateAPIBlobPostPut + case "Blob chunked": + *a = stateAPIBlobPatchChunked + case "Blob streaming": + *a = stateAPIBlobPatchStream + case "Blob mount": + *a = stateAPIBlobMountSource + case "Blob anonymous mount": + *a = stateAPIBlobMountAnonymous + case "Blob get": + *a = stateAPIBlobGetFull + case "Blob get range": + *a = stateAPIBlobGetRange + case "Blob head": + *a = stateAPIBlobHead + case "Blob delete": + *a = stateAPIBlobDelete + case "Blob delete atomic": + *a = stateAPIBlobDeleteAtomic + case "Manifest put by digest": + *a = stateAPIManifestPutDigest + case "Manifest put by tag": + *a = stateAPIManifestPutTag + case "Manifest put with tag params": + *a = stateAPIManifestPutTagParam + case "Manifest put with subject": + *a = stateAPIManifestPutSubject + case "Manifest get by digest": + *a = stateAPIManifestGetDigest + case "Manifest get by tag": + *a = stateAPIManifestGetTag + case "Manifest head by digest": + *a = stateAPIManifestHeadDigest + case "Manifest head by tag": + *a = stateAPIManifestHeadTag + case "Manifest delete": + *a = stateAPIManifestDelete + case "Manifest delete atomic": + *a = stateAPIManifestDeleteAtomic + case "Referrers": + *a = stateAPIReferrers + case "Ping": + *a = stateAPIPing + } + return nil +} diff --git a/conformance/testdata.go b/conformance/testdata.go new file mode 100644 index 00000000..2a99487c --- /dev/null +++ b/conformance/testdata.go @@ -0,0 +1,617 @@ +package main + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "maps" + "math" + "math/big" + "reflect" + "strings" + + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + image "github.com/opencontainers/image-spec/specs-go/v1" +) + +const ( + mtExampleConf1 = "application/vnd.example.oci.conformance.v1" + mtExampleConf2 = "application/vnd.example.oci.conformance.v2" + mtOctetStream = "application/octet-stream" + mtOCIConfig = "application/vnd.oci.image.config.v1+json" + mtOCIImage = "application/vnd.oci.image.manifest.v1+json" + mtOCIIndex = "application/vnd.oci.image.index.v1+json" + mtOCILayer = "application/vnd.oci.image.layer.v1.tar" + mtOCILayerPre = "application/vnd.oci.image.layer.v1." + mtOCILayerGz = "application/vnd.oci.image.layer.v1.tar+gzip" + mtOCILayerNd = "application/vnd.oci.image.layer.nondistributable.v1.tar" + mtOCILayerNdGz = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + mtOCIEmptyJSON = "application/vnd.oci.empty.v1+json" +) + +type testData struct { + name string // name of data set for logs + tags map[string]digest.Digest + tagPushed map[string]bool + desc map[digest.Digest]*image.Descriptor + blobs map[digest.Digest][]byte + manifests map[digest.Digest][]byte + manOrder []digest.Digest // ordered list to push manifests, the last is optionally tagged + referrers map[digest.Digest][]*image.Descriptor + pullOpts map[digest.Digest][]apiDoOpt + pushOpts map[digest.Digest][]apiDoOpt +} + +func newTestData(name string) *testData { + return &testData{ + name: name, + tags: map[string]digest.Digest{}, + tagPushed: map[string]bool{}, + desc: map[digest.Digest]*image.Descriptor{}, + blobs: map[digest.Digest][]byte{}, + manifests: map[digest.Digest][]byte{}, + manOrder: []digest.Digest{}, + referrers: map[digest.Digest][]*image.Descriptor{}, + pullOpts: map[digest.Digest][]apiDoOpt{}, + pushOpts: map[digest.Digest][]apiDoOpt{}, + } +} + +type genComp int + +const ( + genCompUncomp genComp = iota + genCompGzip +) + +type genOptS struct { + algo digest.Algorithm + annotations map[string]string + annotationUniq bool + artifactType string + blobSize int64 + comp genComp + configBytes []byte + configMediaType string + descriptorMediaType string + extraField bool + layerBytes []byte + layerCount int + layerMediaType string + platform image.Platform + platforms []*image.Platform + setData bool + subject *image.Descriptor + tag string +} + +type genOpt func(*genOptS) + +func genWithAlgo(algo digest.Algorithm) genOpt { + return func(opt *genOptS) { + opt.algo = algo + } +} + +func genWithAnnotations(annotations map[string]string) genOpt { + return func(opt *genOptS) { + if opt.annotations == nil { + opt.annotations = annotations + } else { + maps.Copy(opt.annotations, annotations) + } + } +} + +func genWithAnnotationUniq() genOpt { + return func(opt *genOptS) { + opt.annotationUniq = true + } +} + +func genWithArtifactType(artifactType string) genOpt { + return func(opt *genOptS) { + opt.artifactType = artifactType + } +} + +func genWithCompress(comp genComp) genOpt { + return func(opt *genOptS) { + opt.comp = comp + } +} + +func genWithConfigBytes(b []byte) genOpt { + return func(opt *genOptS) { + opt.configBytes = b + } +} + +func genWithConfigMediaType(mediaType string) genOpt { + return func(opt *genOptS) { + opt.configMediaType = mediaType + } +} + +func genWithDescriptorData() genOpt { + return func(opt *genOptS) { + opt.setData = true + } +} + +func genWithDescriptorMediaType(mediaType string) genOpt { + return func(opt *genOptS) { + opt.descriptorMediaType = mediaType + } +} + +func genWithExtraField() genOpt { + return func(opt *genOptS) { + opt.extraField = true + } +} + +func genWithLayerBytes(b []byte) genOpt { + return func(opt *genOptS) { + opt.layerBytes = b + } +} + +func genWithLayerCount(count int) genOpt { + return func(opt *genOptS) { + opt.layerCount = count + } +} + +func genWithLayerMediaType(mediaType string) genOpt { + return func(opt *genOptS) { + opt.layerMediaType = mediaType + } +} + +func genWithPlatform(p image.Platform) genOpt { + return func(opt *genOptS) { + opt.platform = p + } +} + +func genWithPlatforms(platforms []*image.Platform) genOpt { + return func(opt *genOptS) { + opt.platforms = platforms + } +} + +func genWithBlobSize(size int64) genOpt { + return func(opt *genOptS) { + opt.blobSize = size + } +} + +func genWithSubject(subject image.Descriptor) genOpt { + return func(opt *genOptS) { + opt.subject = &subject + } +} + +func genWithTag(tag string) genOpt { + return func(opt *genOptS) { + opt.tag = tag + } +} + +func (td *testData) addBlob(b []byte, opts ...genOpt) (digest.Digest, error) { + gOpt := genOptS{ + algo: digest.Canonical, + descriptorMediaType: mtOctetStream, + } + for _, opt := range opts { + opt(&gOpt) + } + dig := gOpt.algo.FromBytes(b) + td.blobs[dig] = b + td.desc[dig] = &image.Descriptor{ + MediaType: gOpt.descriptorMediaType, + Digest: dig, + Size: int64(len(b)), + } + if gOpt.setData { + td.desc[dig].Data = b + } + return dig, nil +} + +func (td *testData) genBlob(opts ...genOpt) (digest.Digest, []byte, error) { + gOpt := genOptS{ + blobSize: 2048, + } + for _, opt := range opts { + opt(&gOpt) + } + b := make([]byte, gOpt.blobSize) + _, err := rand.Read(b) + if err != nil { + return digest.Digest(""), nil, err + } + dig, err := td.addBlob(b, opts...) + return dig, b, err +} + +// genLayer returns a new layer containing a tar file returning: +// - compressed digest +// - uncompressed digest +// - layer body (tar+compression) +func (td *testData) genLayer(fileNum int, opts ...genOpt) (digest.Digest, digest.Digest, []byte, error) { + gOpt := genOptS{ + comp: genCompGzip, + algo: digest.Canonical, + } + for _, opt := range opts { + opt(&gOpt) + } + bufUncomp := &bytes.Buffer{} + bufComp := &bytes.Buffer{} + var wUncomp io.Writer + var mt string + switch gOpt.comp { + case genCompGzip: + wUncomp = gzip.NewWriter(bufComp) + mt = mtOCILayerGz + case genCompUncomp: + wUncomp = bufComp + mt = mtOCILayer + } + wTar := tar.NewWriter(wUncomp) + bigRandNum, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + return digest.Digest(""), digest.Digest(""), nil, err + } + randNum := bigRandNum.Int64() + file := fmt.Sprintf("Conformance test file contents for file number %d.\nTodays lucky number is %d\n", fileNum, randNum) + err = wTar.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("./conformance-%d.txt", fileNum), + Size: int64(len(file)), + Mode: 0o644, + }) + if err != nil { + return digest.Digest(""), digest.Digest(""), nil, err + } + _, err = wTar.Write([]byte(file)) + if err != nil { + return digest.Digest(""), digest.Digest(""), nil, err + } + err = wTar.Close() + if err != nil { + return digest.Digest(""), digest.Digest(""), nil, err + } + if closer, ok := wUncomp.(io.Closer); gOpt.comp != genCompUncomp && ok { + err = closer.Close() + } + if err != nil { + return digest.Digest(""), digest.Digest(""), nil, err + } + bodyComp := bufComp.Bytes() + bodyUncomp := bufUncomp.Bytes() + digComp := gOpt.algo.FromBytes(bodyComp) + digUncomp := gOpt.algo.FromBytes(bodyUncomp) + td.blobs[digComp] = bodyComp + td.desc[digComp] = &image.Descriptor{ + MediaType: mt, + Digest: digComp, + Size: int64(len(bodyComp)), + } + if gOpt.setData { + td.desc[digComp].Data = bodyComp + } + td.desc[digUncomp] = &image.Descriptor{ + MediaType: mtOCILayer, + Digest: digUncomp, + Size: int64(len(bodyUncomp)), + } + if gOpt.setData { + td.desc[digUncomp].Data = bodyUncomp + } + return digComp, digUncomp, bodyComp, nil +} + +// genConfig returns a config for the given platform and list of uncompressed layer digests. +func (td *testData) genConfig(p image.Platform, layers []digest.Digest, opts ...genOpt) (digest.Digest, []byte, error) { + gOpt := genOptS{ + algo: digest.Canonical, + configMediaType: mtOCIConfig, + } + for _, opt := range opts { + opt(&gOpt) + } + config := image.Image{ + Author: "OCI Conformance Test", + Platform: p, + RootFS: image.RootFS{ + Type: "layers", + DiffIDs: layers, + }, + } + var body []byte + var err error + if !gOpt.extraField { + body, err = json.Marshal(config) + } else { + body, err = json.Marshal(genAddJSONFields(config)) + } + if err != nil { + return digest.Digest(""), nil, err + } + dig := gOpt.algo.FromBytes(body) + td.blobs[dig] = body + td.desc[dig] = &image.Descriptor{ + MediaType: gOpt.configMediaType, + Digest: dig, + Size: int64(len(body)), + } + if gOpt.setData { + td.desc[dig].Data = body + } + return dig, body, nil +} + +// genManifest returns an image manifest with the selected config and compressed layer digests. +func (td *testData) genManifest(conf image.Descriptor, layers []image.Descriptor, opts ...genOpt) (digest.Digest, []byte, error) { + gOpt := genOptS{ + algo: digest.Canonical, + } + for _, opt := range opts { + opt(&gOpt) + } + mt := mtOCIImage + m := image.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: mt, + ArtifactType: gOpt.artifactType, + Config: conf, + Layers: layers, + Subject: gOpt.subject, + Annotations: gOpt.annotations, + } + if gOpt.annotationUniq { + if m.Annotations == nil { + m.Annotations = map[string]string{} + } else { + m.Annotations = maps.Clone(m.Annotations) + } + m.Annotations["org.example."+rand.Text()] = rand.Text() + } + var body []byte + var err error + if !gOpt.extraField { + body, err = json.Marshal(m) + } else { + body, err = json.Marshal(genAddJSONFields(m)) + } + if err != nil { + return digest.Digest(""), nil, err + } + dig := gOpt.algo.FromBytes(body) + td.manifests[dig] = body + td.manOrder = append(td.manOrder, dig) + td.desc[dig] = &image.Descriptor{ + MediaType: m.MediaType, + Digest: dig, + Size: int64(len(body)), + } + if gOpt.setData { + td.desc[dig].Data = body + } + at := m.ArtifactType + if at == "" { + at = m.Config.MediaType + } + if gOpt.subject != nil { + td.referrers[gOpt.subject.Digest] = append(td.referrers[gOpt.subject.Digest], &image.Descriptor{ + MediaType: m.MediaType, + ArtifactType: at, + Digest: dig, + Size: int64(len(body)), + Annotations: m.Annotations, + }) + } + if gOpt.tag != "" { + td.tags[gOpt.tag] = dig + } + return dig, body, nil +} + +// genManifestFull creates an image with layers and a config +func (td *testData) genManifestFull(opts ...genOpt) (digest.Digest, error) { + gOpt := genOptS{ + layerCount: 2, + platform: image.Platform{OS: "linux", Architecture: "amd64"}, + } + for _, opt := range opts { + opt(&gOpt) + } + digCList := []digest.Digest{} + digUCList := []digest.Digest{} + for l := range gOpt.layerCount { + if gOpt.layerMediaType == "" || strings.HasPrefix(gOpt.layerMediaType, mtOCILayerPre) { + // image + digC, digUC, _, err := td.genLayer(l, opts...) + if err != nil { + return "", fmt.Errorf("failed to generate test data layer %d: %w", l, err) + } + digCList = append(digCList, digC) + digUCList = append(digUCList, digUC) + } else { + // artifact + lOpts := []genOpt{ + genWithDescriptorMediaType(gOpt.layerMediaType), + } + lOpts = append(lOpts, opts...) + if gOpt.layerBytes != nil { + dig, err := td.addBlob(gOpt.layerBytes, lOpts...) + if err != nil { + return "", fmt.Errorf("failed to generate test artifact layer: %w", err) + } + digCList = append(digCList, dig) + digUCList = append(digUCList, dig) + } else { + dig, _, err := td.genBlob(lOpts...) + if err != nil { + return "", fmt.Errorf("failed to generate test artifact blob: %w", err) + } + digCList = append(digCList, dig) + digUCList = append(digUCList, dig) + } + } + } + cDig := digest.Digest("") + if gOpt.configMediaType == "" || gOpt.configMediaType == mtOCIConfig { + // image config + dig, _, err := td.genConfig(gOpt.platform, digUCList, opts...) + if err != nil { + return "", fmt.Errorf("failed to generate test data: %w", err) + } + cDig = dig + } else { + // artifact + bOpts := []genOpt{ + genWithDescriptorMediaType(gOpt.configMediaType), + } + bOpts = append(bOpts, opts...) + dig, err := td.addBlob(gOpt.configBytes, bOpts...) + if err != nil { + return "", fmt.Errorf("failed to generate test artifact config: %w", err) + } + cDig = dig + } + layers := make([]image.Descriptor, len(digCList)) + for i, lDig := range digCList { + layers[i] = *td.desc[lDig] + } + mDig, _, err := td.genManifest(*td.desc[cDig], layers, opts...) + if err != nil { + return "", fmt.Errorf("failed to generate test data: %w", err) + } + return mDig, nil +} + +// genIndex returns an index manifest with the specified layers and platforms. +func (td *testData) genIndex(platforms []*image.Platform, manifests []digest.Digest, opts ...genOpt) (digest.Digest, []byte, error) { + mt := mtOCIIndex + gOpt := genOptS{ + algo: digest.Canonical, + } + for _, opt := range opts { + opt(&gOpt) + } + if len(platforms) != len(manifests) { + return digest.Digest(""), nil, fmt.Errorf("genIndex requires the same number of platforms and layers") + } + ind := image.Index{ + Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: mt, + ArtifactType: gOpt.artifactType, + Manifests: make([]image.Descriptor, len(manifests)), + Subject: gOpt.subject, + Annotations: gOpt.annotations, + } + for i, l := range manifests { + d := *td.desc[l] + d.Platform = platforms[i] + ind.Manifests[i] = d + } + if gOpt.annotationUniq { + if ind.Annotations == nil { + ind.Annotations = map[string]string{} + } else { + ind.Annotations = maps.Clone(ind.Annotations) + } + ind.Annotations["org.example."+rand.Text()] = rand.Text() + } + var body []byte + var err error + if !gOpt.extraField { + body, err = json.Marshal(ind) + } else { + body, err = json.Marshal(genAddJSONFields(ind)) + } + if err != nil { + return digest.Digest(""), nil, err + } + dig := gOpt.algo.FromBytes(body) + td.manifests[dig] = body + td.manOrder = append(td.manOrder, dig) + td.desc[dig] = &image.Descriptor{ + MediaType: ind.MediaType, + Digest: dig, + Size: int64(len(body)), + } + if gOpt.setData { + td.desc[dig].Data = body + } + if gOpt.subject != nil { + td.referrers[gOpt.subject.Digest] = append(td.referrers[gOpt.subject.Digest], &image.Descriptor{ + MediaType: ind.MediaType, + ArtifactType: ind.ArtifactType, + Digest: dig, + Size: int64(len(body)), + Annotations: ind.Annotations, + }) + } + if gOpt.tag != "" { + td.tags[gOpt.tag] = dig + } + return dig, body, nil +} + +// genIndexFull creates an index with multiple images, including the image layers and configs +func (td *testData) genIndexFull(opts ...genOpt) (digest.Digest, error) { + gOpt := genOptS{ + platforms: []*image.Platform{ + {OS: "linux", Architecture: "amd64"}, + {OS: "linux", Architecture: "arm64"}, + }, + } + for _, opt := range opts { + opt(&gOpt) + } + digImgList := []digest.Digest{} + for _, p := range gOpt.platforms { + iOpts := []genOpt{ + genWithPlatform(*p), + } + iOpts = append(iOpts, opts...) + mDig, err := td.genManifestFull(iOpts...) + if err != nil { + return "", err + } + digImgList = append(digImgList, mDig) + } + iDig, _, err := td.genIndex(gOpt.platforms, digImgList, opts...) + if err != nil { + return "", fmt.Errorf("failed to generate test data: %w", err) + } + return iDig, nil +} + +func genAddJSONFields(v any) any { + newT := reflect.StructOf([]reflect.StructField{ + { + Name: "Embed", + Anonymous: true, + Type: reflect.TypeOf(v), + }, + { + Name: "Custom", + Type: reflect.TypeFor[string](), + Tag: reflect.StructTag("json:\"org." + rand.Text() + "\""), + }, + }) + newV := reflect.New(newT).Elem() + newV.Field(0).Set(reflect.ValueOf(v)) + newV.FieldByName("Custom").SetString(rand.Text()) + return newV.Interface() +}