Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 26 additions & 2 deletions doc/plugin_agent_workloadattestor_docker.md
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
# Agent plugin: WorkloadAttestor "docker"

The `docker` plugin generates selectors based on docker labels for workloads calling the agent.
The `docker` plugin generates selectors based on container labels for workloads calling the agent.
It does so by retrieving the workload's container ID from its cgroup membership on Unix systems or Job Object names on Windows,
then querying the docker daemon for the container's labels.
then querying the container runtime API (Docker by default, or Podman when detected) for the container's labels.

| Configuration | Description | Default |
|--------------------------------|------------------------------------------------------------------------------------------------|----------------------------------|
| docker_socket_path | The location of the docker daemon socket (Unix) | "unix:///var/run/docker.sock" |
| podman_socket_path | The location of the rootful Podman socket (Unix) | "unix:///run/podman/podman.sock" |
| podman_socket_path_template | The socket template for rootless Podman (Unix). Must contain one `%d` UID placeholder | "unix:///run/user/%d/podman/podman.sock" |
| docker_version | The API version of the docker daemon. If not specified | |
| container_id_cgroup_matchers | A list of patterns used to discover container IDs from cgroup entries (Unix) | |
| docker_host | The location of the Docker Engine API endpoint (Windows only) | "npipe:////./pipe/docker_engine" |
Expand All @@ -22,6 +24,28 @@ A sample configuration:
}
```

## Podman support (Unix)

The plugin supports Podman workloads, including rootless Podman in multi-user hosts.

At attestation time, the plugin inspects the workload cgroup path:

- If a Podman cgroup path is detected and includes a user slice (`/user-<uid>.slice/`), SPIRE treats the workload as rootless Podman and calls the Podman API using `podman_socket_path_template` with `<uid>` substituted into `%d`.
- If a Podman cgroup path is detected but no user slice UID is present, SPIRE uses `podman_socket_path` (rootful Podman).
- If no Podman cgroup path is detected, SPIRE uses `docker_socket_path` (Docker).

This per-workload socket selection avoids routing rootless Podman workloads through a single global daemon socket.

Example rootless customization:

```hcl
WorkloadAttestor "docker" {
plugin_data {
podman_socket_path_template = "unix:///custom/user/%d/podman.sock"
}
}
```

## Sigstore experimental feature

This feature extends the `docker` workload attestor with the ability to validate container image signatures and attestations using the [Sigstore](https://www.sigstore.dev/) ecosystem.
Expand Down
44 changes: 36 additions & 8 deletions pkg/agent/plugin/workloadattestor/docker/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,25 +48,38 @@ type Docker interface {
ImageInspectWithRaw(ctx context.Context, imageID string) (image.InspectResponse, []byte, error)
}

type closeableDocker interface {
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it would be cleaner to make the close obligation explicit at the type level, for example changing podmanClientFactory to return (Docker, io.Closer, error), rather than relying on a runtime type assertion in Attest.
I believe that would make the contract more discoverable. Looks like right now, to learn that the returned client should be closeable, you have to trace through Attest and find the type assertion.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Makes sense, thanks! Replaced the runtime type assertion with a podmanDocker interface embedding Docker and Close() error, so podmanClientFactory now returns (podmanDocker, error) directly.

Close() error
}

type Plugin struct {
workloadattestorv1.UnsafeWorkloadAttestorServer
configv1.UnsafeConfigServer

log hclog.Logger
retryer *retryer

mtx sync.RWMutex
docker Docker
c *containerHelper
sigstoreVerifier sigstore.Verifier
mtx sync.RWMutex
docker Docker
c *containerHelper
sigstoreVerifier sigstore.Verifier
podmanClientFactory func(socketPath string) (Docker, error)
}

func New() *Plugin {
return &Plugin{
retryer: newRetryer(),
retryer: newRetryer(),
podmanClientFactory: defaultPodmanClientFactory,
}
}

func defaultPodmanClientFactory(socketPath string) (Docker, error) {
return dockerclient.NewClientWithOpts(
dockerclient.WithHost(socketPath),
dockerclient.WithAPIVersionNegotiation(),
)
}

type dockerPluginConfig struct {
OSConfig `hcl:",squash"`

Expand Down Expand Up @@ -132,7 +145,7 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque
p.mtx.RLock()
defer p.mtx.RUnlock()

containerID, err := p.c.getContainerID(req.Pid, p.log)
containerID, podmanSocket, err := p.c.getContainerIDAndSocket(req.Pid, p.log)
switch {
case err != nil:
return nil, err
Expand All @@ -141,9 +154,24 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque
return &workloadattestorv1.AttestResponse{}, nil
}

client := p.docker
if podmanSocket != "" {
client, err = p.podmanClientFactory(podmanSocket)
if err != nil {
return nil, fmt.Errorf("unable to create Podman client for socket %q: %w", podmanSocket, err)
}
if closeableClient, ok := client.(closeableDocker); ok {
defer func() {
if closeErr := closeableClient.Close(); closeErr != nil {
p.log.Warn("Failed to close Podman client", telemetry.Error, closeErr)
}
}()
}
}

var container container.InspectResponse
err = p.retryer.Retry(ctx, func() error {
container, err = p.docker.ContainerInspect(ctx, containerID)
container, err = client.ContainerInspect(ctx, containerID)
return err
})
if err != nil {
Expand All @@ -156,7 +184,7 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque
var inspectErr error
imageName := container.Config.Image
if imageName != "" || p.sigstoreVerifier != nil {
imageJSON, _, inspectErr = p.docker.ImageInspectWithRaw(ctx, imageName)
imageJSON, _, inspectErr = client.ImageInspectWithRaw(ctx, imageName)
}

// Add image_config_digest selector
Expand Down
101 changes: 97 additions & 4 deletions pkg/agent/plugin/workloadattestor/docker/docker_posix.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ import (
"io"
"os"
"path/filepath"
"regexp"
"strconv"

"github.com/hashicorp/go-hclog"
"github.com/spiffe/spire/pkg/agent/common/cgroups"
Expand All @@ -16,6 +18,16 @@ import (
"github.com/spiffe/spire/pkg/common/pluginconf"
)

const (
defaultPodmanSocketPath = "unix:///run/podman/podman.sock"
defaultPodmanSocketPathTemplate = "unix:///run/user/%d/podman/podman.sock"
)

var (
rePodmanCgroup = regexp.MustCompile(`(?:libpod-|/libpod/)`)
reUserSliceUID = regexp.MustCompile(`/user-(\d+)\.slice/`)
)

type OSConfig struct {
// DockerSocketPath is the location of the docker daemon socket, this config can be used only on unix environments (default: "unix:///var/run/docker.sock").
DockerSocketPath string `hcl:"docker_socket_path" json:"docker_socket_path"`
Expand All @@ -33,6 +45,15 @@ type OSConfig struct {
// about mountinfo and cgroup information used to locate the container.
VerboseContainerLocatorLogs bool `hcl:"verbose_container_locator_logs"`

// PodmanSocketPath is the socket path for rootful Podman (no user namespace).
// Defaults to "unix:///run/podman/podman.sock".
PodmanSocketPath string `hcl:"podman_socket_path" json:"podman_socket_path"`

// PodmanSocketPathTemplate is the socket path template for rootless Podman.
// The placeholder %d is replaced with the container owner's host UID extracted
// from the cgroup path. Defaults to "unix:///run/user/%d/podman/podman.sock".
PodmanSocketPathTemplate string `hcl:"podman_socket_path_template" json:"podman_socket_path_template"`

// Used by tests to use a fake /proc directory instead of the real one
rootDir string
}
Expand Down Expand Up @@ -63,10 +84,25 @@ func (p *Plugin) createHelper(c *dockerPluginConfig, status *pluginconf.Status)
rootDir = "/"
}

podmanSocketPath := c.PodmanSocketPath
if podmanSocketPath == "" {
podmanSocketPath = defaultPodmanSocketPath
}
podmanSocketPathTemplate := c.PodmanSocketPathTemplate
if podmanSocketPathTemplate == "" {
podmanSocketPathTemplate = defaultPodmanSocketPathTemplate
}
if err := validatePodmanSocketPathTemplate(podmanSocketPathTemplate); err != nil {
status.ReportErrorf("invalid podman_socket_path_template: %v", err)
return nil
}

return &containerHelper{
rootDir: rootDir,
containerIDFinder: containerIDFinder,
verboseContainerLocatorLogs: c.VerboseContainerLocatorLogs,
podmanSocketPath: podmanSocketPath,
podmanSocketPathTemplate: podmanSocketPathTemplate,
}
}

Expand All @@ -80,19 +116,76 @@ type containerHelper struct {
rootDir string
containerIDFinder cgroup.ContainerIDFinder
verboseContainerLocatorLogs bool
podmanSocketPath string
podmanSocketPathTemplate string
}

func (h *containerHelper) getContainerID(pID int32, log hclog.Logger) (string, error) {
func (h *containerHelper) getContainerIDAndSocket(pID int32, log hclog.Logger) (string, string, error) {
if h.containerIDFinder != nil {
cgroupList, err := cgroups.GetCgroups(pID, dirFS(h.rootDir))
if err != nil {
return "", err
return "", "", err
}
return getContainerIDFromCGroups(h.containerIDFinder, cgroupList)
containerID, err := getContainerIDFromCGroups(h.containerIDFinder, cgroupList)
if err != nil || containerID == "" {
return "", "", err
}
return containerID, h.detectPodmanSocket(cgroupList), nil
}

extractor := containerinfo.Extractor{RootDir: h.rootDir, VerboseLogging: h.verboseContainerLocatorLogs}
return extractor.GetContainerID(pID, log)
containerID, err := extractor.GetContainerID(pID, log)
if err != nil || containerID == "" {
return "", "", err
}

cgroupList, err := cgroups.GetCgroups(pID, dirFS(h.rootDir))
if err != nil {
log.Warn("Failed to read cgroups for Podman detection, falling back to Docker client", "pid", pID, "err", err)
return containerID, "", nil
}
return containerID, h.detectPodmanSocket(cgroupList), nil
}

func (h *containerHelper) detectPodmanSocket(cgroupList []cgroups.Cgroup) string {
for _, cg := range cgroupList {
if !rePodmanCgroup.MatchString(cg.GroupPath) {
continue
}
if m := reUserSliceUID.FindStringSubmatch(cg.GroupPath); m != nil {
if uid, err := strconv.ParseUint(m[1], 10, 32); err == nil {
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm wondering if we should log a warning when ParseUint fails here. The regex matched a user slice, so the workload is rootless, but the code would silently fall through to the rootful Podman socket.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point, thanks! Added a Warn log with the raw UID and cgroup path before falling back to the rootful socket.

return fmt.Sprintf(h.podmanSocketPathTemplate, uid)
}
}
return h.podmanSocketPath
}
return ""
}

func validatePodmanSocketPathTemplate(template string) error {
var placeholders int
for i := 0; i < len(template); i++ {
if template[i] != '%' {
continue
}
if i+1 >= len(template) {
return errors.New("trailing % at end of template")
}
switch template[i+1] {
case '%':
i++
case 'd':
placeholders++
i++
default:
return errors.New("template only supports escaped %% or the %d UID placeholder")
}
}

if placeholders != 1 {
return errors.New("template must contain exactly one %d UID placeholder")
}
return nil
}

func getDockerHost(c *dockerPluginConfig) string {
Expand Down
Loading