diff --git a/cmd/buf/internal/command/alpha/protoc/protoc.go b/cmd/buf/internal/command/alpha/protoc/protoc.go index 8c5daea9bb..27321487ed 100644 --- a/cmd/buf/internal/command/alpha/protoc/protoc.go +++ b/cmd/buf/internal/command/alpha/protoc/protoc.go @@ -219,7 +219,7 @@ func run( return err } } - if err := responseWriter.Close(); err != nil { + if err := responseWriter.Close(ctx); err != nil { return err } return nil diff --git a/cmd/buf/internal/command/generate/generate_test.go b/cmd/buf/internal/command/generate/generate_test.go index 5a8cc15538..4a88e931c9 100644 --- a/cmd/buf/internal/command/generate/generate_test.go +++ b/cmd/buf/internal/command/generate/generate_test.go @@ -372,7 +372,7 @@ func TestOutputFlag(t *testing.T) { } } -func TestSkipWriteWhenUnchanged(t *testing.T) { +func TestSmartCleanPreservesMtime(t *testing.T) { t.Parallel() tempDirPath := t.TempDir() template := filepath.Join("testdata", "simple", "buf.gen.yaml") diff --git a/private/buf/bufgen/generator.go b/private/buf/bufgen/generator.go index 769d452010..283f85da5f 100644 --- a/private/buf/bufgen/generator.go +++ b/private/buf/bufgen/generator.go @@ -104,15 +104,17 @@ func (g *generator) Generate( if generateOptions.deleteOuts != nil { shouldDeleteOuts = *generateOptions.deleteOuts } + responseWriterOptions := []bufprotopluginos.ResponseWriterOption{ + bufprotopluginos.ResponseWriterWithCreateOutDirIfNotExists(), + } if shouldDeleteOuts { - if err := g.deleteOuts( - ctx, - generateOptions.baseOutDirPath, - config.GeneratePluginConfigs(), - ); err != nil { - return err - } + responseWriterOptions = append(responseWriterOptions, bufprotopluginos.ResponseWriterWithDeleteOuts()) } + responseWriter := bufprotopluginos.NewResponseWriter( + g.logger, + g.storageosProvider, + responseWriterOptions..., + ) for _, image := range images { if err := g.generateCode( ctx, @@ -120,33 +122,14 @@ func (g *generator) Generate( image, generateOptions.baseOutDirPath, config.GeneratePluginConfigs(), + responseWriter, generateOptions.includeImportsOverride, generateOptions.includeWellKnownTypesOverride, ); err != nil { return err } } - return nil -} - -func (g *generator) deleteOuts( - ctx context.Context, - baseOutDir string, - pluginConfigs []bufconfig.GeneratePluginConfig, -) error { - return bufprotopluginos.NewCleaner(g.storageosProvider).DeleteOuts( - ctx, - xslices.Map( - pluginConfigs, - func(pluginConfig bufconfig.GeneratePluginConfig) string { - out := pluginConfig.Out() - if baseOutDir != "" && baseOutDir != "." { - return filepath.Join(baseOutDir, out) - } - return out - }, - ), - ) + return responseWriter.Close(ctx) } func (g *generator) generateCode( @@ -155,6 +138,7 @@ func (g *generator) generateCode( inputImage bufimage.Image, baseOutDir string, pluginConfigs []bufconfig.GeneratePluginConfig, + responseWriter bufprotopluginos.ResponseWriter, includeImportsOverride *bool, includeWellKnownTypesOverride *bool, ) error { @@ -170,11 +154,6 @@ func (g *generator) generateCode( return err } // Apply the CodeGeneratorResponses in the order they were specified. - responseWriter := bufprotopluginos.NewResponseWriter( - g.logger, - g.storageosProvider, - bufprotopluginos.ResponseWriterWithCreateOutDirIfNotExists(), - ) for i, pluginConfig := range pluginConfigs { out := pluginConfig.Out() if baseOutDir != "" && baseOutDir != "." { @@ -192,9 +171,6 @@ func (g *generator) generateCode( return fmt.Errorf("plugin %s: %v", pluginConfig.Name(), err) } } - if err := responseWriter.Close(); err != nil { - return err - } return nil } diff --git a/private/bufpkg/bufprotoplugin/bufprotopluginos/bufprotopluginos.go b/private/bufpkg/bufprotoplugin/bufprotopluginos/bufprotopluginos.go index 4d4092caef..362ec10180 100644 --- a/private/bufpkg/bufprotoplugin/bufprotopluginos/bufprotopluginos.go +++ b/private/bufpkg/bufprotoplugin/bufprotopluginos/bufprotopluginos.go @@ -17,7 +17,6 @@ package bufprotopluginos import ( "context" - "io" "log/slog" "github.com/bufbuild/buf/private/pkg/storage/storageos" @@ -26,9 +25,11 @@ import ( // ResponseWriter writes CodeGeneratorResponses to the OS filesystem. type ResponseWriter interface { - // Close writes all of the responses to disk. No further calls can be - // made to the ResponseWriter after this call. - io.Closer + // Close writes all of the responses to disk and, when + // ResponseWriterWithDeleteOuts is enabled, removes stale files from + // output directories. No further calls can be made to the + // ResponseWriter after this call. + Close(ctx context.Context) error // AddResponse adds the response to the writer, switching on the file extension. // If there is a .jar extension, this generates a jar. If there is a .zip @@ -67,15 +68,14 @@ func ResponseWriterWithCreateOutDirIfNotExists() ResponseWriterOption { } } -// Cleaner deletes output locations prior to generation. -// -// This must be done before any interaction with ResponseWriters, as multiple plugins may output to a single -// location. -type Cleaner interface { - DeleteOuts(ctx context.Context, pluginOuts []string) error -} - -// NewCleaner returns a new Cleaner. -func NewCleaner(storageosProvider storageos.Provider) Cleaner { - return newCleaner(storageosProvider) +// ResponseWriterWithDeleteOuts returns a ResponseWriterOption that deletes files +// on Close that were not written during generation. For directory outputs, any +// file on disk that was not part of the generated output is deleted after all +// new content is written, and empty directories are removed. For zip/jar outputs, +// the file is only rewritten when the generated content differs from what is +// already on disk. +func ResponseWriterWithDeleteOuts() ResponseWriterOption { + return func(responseWriterOptions *responseWriterOptions) { + responseWriterOptions.deleteOuts = true + } } diff --git a/private/bufpkg/bufprotoplugin/bufprotopluginos/cleaner.go b/private/bufpkg/bufprotoplugin/bufprotopluginos/cleaner.go deleted file mode 100644 index dda9b09bd0..0000000000 --- a/private/bufpkg/bufprotoplugin/bufprotopluginos/cleaner.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2020-2026 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bufprotopluginos - -import ( - "context" - "errors" - "io/fs" - "path/filepath" - - "buf.build/go/standard/xpath/xfilepath" - "github.com/bufbuild/buf/private/pkg/normalpath" - "github.com/bufbuild/buf/private/pkg/osext" - "github.com/bufbuild/buf/private/pkg/storage/storageos" - "github.com/bufbuild/buf/private/pkg/syserror" -) - -type cleaner struct { - storageosProvider storageos.Provider -} - -func newCleaner( - storageosProvider storageos.Provider, -) *cleaner { - return &cleaner{ - storageosProvider: storageosProvider, - } -} - -func (c *cleaner) DeleteOuts( - ctx context.Context, - pluginOuts []string, -) error { - pwd, err := osext.Getwd() - if err != nil { - return err - } - pwd, err = reallyCleanPath(pwd) - if err != nil { - return err - } - for _, pluginOut := range pluginOuts { - if err := validatePluginOut(pwd, pluginOut); err != nil { - return err - } - } - for _, pluginOut := range pluginOuts { - if err := c.deleteOut(ctx, pluginOut); err != nil { - return err - } - } - return nil -} - -func (c *cleaner) deleteOut( - ctx context.Context, - pluginOut string, -) error { - dirPath := pluginOut - removePath := "." - switch filepath.Ext(pluginOut) { - case ".jar", ".zip": - dirPath = normalpath.Dir(pluginOut) - removePath = normalpath.Base(pluginOut) - default: - // Assume output is a directory. - } - bucket, err := c.storageosProvider.NewReadWriteBucket( - dirPath, - storageos.ReadWriteBucketWithSymlinksIfSupported(), - ) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - return nil - } - return err - } - return bucket.DeleteAll(ctx, removePath) -} - -func validatePluginOut(pwd string, pluginOut string) error { - if pluginOut == "" { - // This is just triple-making sure. - return syserror.New("got empty pluginOut in bufprotopluginos.Cleaner") - } - if pluginOut == "." { - // This is just a really defensive safety check. We can't see a reason you'd want to delete - // your current working directory other than something like a (cd proto && buf generate), so - // until and unless someone complains, we're just going to outlaw this. - return errors.New("cannot use --clean if your plugin will output to the current directory") - } - cleanedPluginOut, err := reallyCleanPath(pluginOut) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - return nil - } - return err - } - if cleanedPluginOut == pwd { - // Same thing, more defense for now. - return errors.New("cannot use --clean if your plugin will output to the current directory") - } - return nil -} - -func reallyCleanPath(path string) (string, error) { - path, err := xfilepath.RealClean(path) - if err != nil { - return "", err - } - return filepath.EvalSymlinks(path) -} diff --git a/private/bufpkg/bufprotoplugin/bufprotopluginos/response_writer.go b/private/bufpkg/bufprotoplugin/bufprotopluginos/response_writer.go index ec4ff29360..1bdf0c5ddc 100644 --- a/private/bufpkg/bufprotoplugin/bufprotopluginos/response_writer.go +++ b/private/bufpkg/bufprotoplugin/bufprotopluginos/response_writer.go @@ -19,13 +19,16 @@ import ( "context" "errors" "fmt" + "io/fs" "log/slog" "os" "path/filepath" "sync" + "buf.build/go/standard/xpath/xfilepath" "github.com/bufbuild/buf/private/bufpkg/bufprotoplugin" "github.com/bufbuild/buf/private/pkg/normalpath" + "github.com/bufbuild/buf/private/pkg/osext" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storagearchive" "github.com/bufbuild/buf/private/pkg/storage/storagemem" @@ -34,6 +37,11 @@ import ( "google.golang.org/protobuf/types/pluginpb" ) +const ( + jarExt = ".jar" + zipExt = ".zip" +) + // Constants used to create .jar files. var ( manifestPath = normalpath.Join("META-INF", "MANIFEST.MF") @@ -49,6 +57,9 @@ type responseWriter struct { responseWriter bufprotoplugin.ResponseWriter // If set, create directories if they don't already exist. createOutDirIfNotExists bool + // If set, delete files from output directories that were not written + // during generation. + deleteOuts bool // Cache the readWriteBuckets by their respective output paths. // These builders are transformed to storage.ReadBuckets and written // to disk once the responseWriter is flushed. @@ -68,7 +79,7 @@ type responseWriter struct { // Cache the functions used to flush all of the responses to disk. // This holds all of the buckets in-memory so that we only write // the results to disk if all of the responses are successful. - closers []func() error + closers []func(ctx context.Context) error lock sync.RWMutex } @@ -86,6 +97,7 @@ func newResponseWriter( storageosProvider: storageosProvider, responseWriter: bufprotoplugin.NewResponseWriter(logger), createOutDirIfNotExists: responseWriterOptions.createOutDirIfNotExists, + deleteOuts: responseWriterOptions.deleteOuts, readWriteBuckets: make(map[string]storage.ReadWriteBucket), } } @@ -118,11 +130,11 @@ func (w *responseWriter) AddResponse( ) } -func (w *responseWriter) Close() error { +func (w *responseWriter) Close(ctx context.Context) error { w.lock.Lock() defer w.lock.Unlock() for _, closeFunc := range w.closers { - if err := closeFunc(); err != nil { + if err := closeFunc(ctx); err != nil { // Although unlikely, if an error happens here, // some generated files could be written to disk, // whereas others aren't. @@ -132,9 +144,38 @@ func (w *responseWriter) Close() error { return err } } + // Collect the set of generated paths per directory output before + // clearing state, so the delete phase knows which files to keep. + var dirOutputPaths map[string]map[string]struct{} + if w.deleteOuts { + dirOutputPaths = make(map[string]map[string]struct{}, len(w.readWriteBuckets)) + for outPath, readWriteBucket := range w.readWriteBuckets { + if isArchivePath(outPath) { + continue + } + paths, err := storage.AllPaths(ctx, readWriteBucket, "") + if err != nil { + return err + } + pathSet := make(map[string]struct{}, len(paths)) + for _, path := range paths { + pathSet[path] = struct{}{} + } + dirOutputPaths[outPath] = pathSet + } + } // Re-initialize the cached values to be safe. w.readWriteBuckets = make(map[string]storage.ReadWriteBucket) w.closers = nil + if !w.deleteOuts { + return nil + } + // Delete stale files and remove empty directories. + for outDirPath, retainPaths := range dirOutputPaths { + if err := w.deleteStaleFilesAndEmptyDirs(ctx, outDirPath, retainPaths); err != nil { + return err + } + } return nil } @@ -144,8 +185,17 @@ func (w *responseWriter) addResponse( pluginOut string, createOutDirIfNotExists bool, ) error { + // Validate on the first time we see each output path when deleteOuts is + // enabled, before committing to any destructive operations. + if w.deleteOuts { + if _, seen := w.readWriteBuckets[pluginOut]; !seen { + if err := w.validateDeleteOutPath(pluginOut); err != nil { + return err + } + } + } switch filepath.Ext(pluginOut) { - case ".jar": + case jarExt: return w.writeZip( ctx, response, @@ -153,7 +203,7 @@ func (w *responseWriter) addResponse( true, createOutDirIfNotExists, ) - case ".zip": + case zipExt: return w.writeZip( ctx, response, @@ -177,7 +227,7 @@ func (w *responseWriter) writeZip( outFilePath string, includeManifest bool, createOutDirIfNotExists bool, -) (retErr error) { +) error { outDirPath := filepath.Dir(outFilePath) if readWriteBucket, ok := w.readWriteBuckets[outFilePath]; ok { // We already have a readWriteBucket for this outFilePath, so @@ -225,18 +275,26 @@ func (w *responseWriter) writeZip( // Add this readWriteBucket to the set so that other plugins // can write to the same files (re: insertion points). w.readWriteBuckets[outFilePath] = readWriteBucket - w.closers = append(w.closers, func() (retErr error) { - // We're done writing all of the content into this - // readWriteBucket, so we zip it when we flush. + w.closers = append(w.closers, func(ctx context.Context) error { + // Zip the generated content into a buffer so we can compare it with + // the existing file before deciding whether to write. This preserves + // the modification time when the output is unchanged. + var buf bytes.Buffer + // protoc does not compress. + if err := storagearchive.Zip(ctx, readWriteBucket, &buf, false); err != nil { + return err + } + newContent := buf.Bytes() + existingContent, err := os.ReadFile(outFilePath) + if err == nil && bytes.Equal(existingContent, newContent) { + return nil + } file, err := os.Create(outFilePath) if err != nil { return err } - defer func() { - retErr = errors.Join(retErr, file.Close()) - }() - // protoc does not compress. - return storagearchive.Zip(ctx, readWriteBucket, file, false) + _, writeErr := file.Write(newContent) + return errors.Join(writeErr, file.Close()) }) return nil } @@ -272,7 +330,7 @@ func (w *responseWriter) writeDirectory( // Add this readWriteBucket to the set so that other plugins // can write to the same files (re: insertion points). w.readWriteBuckets[outDirPath] = readWriteBucket - w.closers = append(w.closers, func() error { + w.closers = append(w.closers, func(ctx context.Context) error { if createOutDirIfNotExists { if err := os.MkdirAll(outDirPath, 0755); err != nil { return err @@ -325,10 +383,129 @@ func (w *responseWriter) copySkipUnchanged( return thread.Parallelize(ctx, jobs) } +// deleteStaleFilesAndEmptyDirs deletes files present in outDirPath that are +// not in retainPaths, then removes any directories that are now empty. +func (w *responseWriter) deleteStaleFilesAndEmptyDirs( + ctx context.Context, + outDirPath string, + retainPaths map[string]struct{}, +) error { + osReadWriteBucket, err := w.storageosProvider.NewReadWriteBucket( + outDirPath, + storageos.ReadWriteBucketWithSymlinksIfSupported(), + ) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // Output directory doesn't exist; nothing to delete. + return nil + } + return err + } + existingPaths, err := storage.AllPaths(ctx, osReadWriteBucket, "") + if err != nil { + return err + } + var deleteJobs []func(context.Context) error + for _, existingPath := range existingPaths { + if _, ok := retainPaths[existingPath]; !ok { + deleteJobs = append(deleteJobs, func(ctx context.Context) error { + w.logger.DebugContext(ctx, "deleting stale generated file", slog.String("path", existingPath)) + if err := osReadWriteBucket.Delete(ctx, existingPath); err != nil && !errors.Is(err, fs.ErrNotExist) { + return err + } + return nil + }) + } + } + if err := thread.Parallelize(ctx, deleteJobs); err != nil { + return err + } + return removeEmptyDirs(outDirPath) +} + +// removeEmptyDirs recursively removes all empty directories under rootDir. +// It processes children before parents so that a chain of directories that +// are empty after their children are removed will be fully cleaned up. +// The rootDir itself is never removed. +// +// This operates directly on the filesystem because the storage abstraction +// only models files, not directories. +func removeEmptyDirs(rootDir string) error { + entries, err := os.ReadDir(rootDir) + if err != nil { + return err + } + for _, entry := range entries { + if entry.IsDir() { + childDir := filepath.Join(rootDir, entry.Name()) + if err := removeEmptyDirs(childDir); err != nil { + return err + } + // Re-check after recursing into children: the child directory + // may now be empty if all its contents were removed. + childEntries, err := os.ReadDir(childDir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + continue + } + return err + } + if len(childEntries) == 0 { + if err := os.Remove(childDir); err != nil && !os.IsNotExist(err) { + return err + } + } + } + } + return nil +} + +// validateDeleteOutPath checks that the output path is safe to delete from. +// It prevents accidentally deleting files from the current working directory, +// which could happen if a user configures out as ".". +// The path is already absolute (via filepath.Abs in AddResponse). +func (w *responseWriter) validateDeleteOutPath(absOutPath string) error { + pwd, err := osext.Getwd() + if err != nil { + return err + } + resolvedPwd, err := resolveCleanPath(pwd) + if err != nil { + return err + } + resolvedOut, err := resolveCleanPath(absOutPath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err + } + if resolvedOut == resolvedPwd { + return errors.New("cannot use --clean if your plugin will output to the current directory") + } + return nil +} + type responseWriterOptions struct { createOutDirIfNotExists bool + deleteOuts bool } func newResponseWriterOptions() *responseWriterOptions { return &responseWriterOptions{} } + +// resolveCleanPath returns the real, cleaned absolute path, following symlinks. +func resolveCleanPath(path string) (string, error) { + path, err := xfilepath.RealClean(path) + if err != nil { + return "", err + } + return filepath.EvalSymlinks(path) +} + +// isArchivePath returns true if the given path has a .zip or .jar extension. +func isArchivePath(path string) bool { + ext := filepath.Ext(path) + return ext == zipExt || ext == jarExt +} diff --git a/private/bufpkg/bufprotoplugin/bufprotopluginos/response_writer_test.go b/private/bufpkg/bufprotoplugin/bufprotopluginos/response_writer_test.go index 6126c9ac2a..bffdfc08ea 100644 --- a/private/bufpkg/bufprotoplugin/bufprotopluginos/response_writer_test.go +++ b/private/bufpkg/bufprotoplugin/bufprotopluginos/response_writer_test.go @@ -35,7 +35,7 @@ func TestResponseWriterSkipsUnchangedFile(t *testing.T) { past := time.Now().Add(-time.Hour) require.NoError(t, os.Chtimes(filePath, past, past)) - runResponseWriter(t, outDir, newResponseFile("foo.go", content)) + runResponseWriter(t, outDir, false, newResponseFile("foo.go", content)) info, err := os.Stat(filePath) require.NoError(t, err) @@ -51,7 +51,7 @@ func TestResponseWriterWritesChangedFile(t *testing.T) { require.NoError(t, os.Chtimes(filePath, past, past)) newContent := "package new\n" - runResponseWriter(t, outDir, newResponseFile("foo.go", newContent)) + runResponseWriter(t, outDir, false, newResponseFile("foo.go", newContent)) data, err := os.ReadFile(filePath) require.NoError(t, err) @@ -64,13 +64,12 @@ func TestResponseWriterWritesChangedFile(t *testing.T) { func TestResponseWriterWritesNewFile(t *testing.T) { t.Parallel() outDir := t.TempDir() - content := "package foo\n" - runResponseWriter(t, outDir, newResponseFile("foo.go", content)) + runResponseWriter(t, outDir, false, newResponseFile("foo.go", "package foo\n")) data, err := os.ReadFile(filepath.Join(outDir, "foo.go")) require.NoError(t, err) - require.Equal(t, content, string(data)) + require.Equal(t, "package foo\n", string(data)) } func TestResponseWriterMixedFiles(t *testing.T) { @@ -79,14 +78,13 @@ func TestResponseWriterMixedFiles(t *testing.T) { unchangedContent := "package unchanged\n" unchangedPath := filepath.Join(outDir, "unchanged.go") changedPath := filepath.Join(outDir, "changed.go") - newPath := filepath.Join(outDir, "new.go") require.NoError(t, os.WriteFile(unchangedPath, []byte(unchangedContent), 0600)) require.NoError(t, os.WriteFile(changedPath, []byte("package old\n"), 0600)) past := time.Now().Add(-time.Hour) require.NoError(t, os.Chtimes(unchangedPath, past, past)) require.NoError(t, os.Chtimes(changedPath, past, past)) - runResponseWriter(t, outDir, + runResponseWriter(t, outDir, false, newResponseFile("unchanged.go", unchangedContent), newResponseFile("changed.go", "package changed\n"), newResponseFile("new.go", "package new\n"), @@ -103,24 +101,231 @@ func TestResponseWriterMixedFiles(t *testing.T) { require.NoError(t, err) require.Greater(t, changedInfo.ModTime(), past) - newData, err := os.ReadFile(newPath) + newData, err := os.ReadFile(filepath.Join(outDir, "new.go")) require.NoError(t, err) require.Equal(t, "package new\n", string(newData)) } -func runResponseWriter(t *testing.T, outDir string, files ...*pluginpb.CodeGeneratorResponse_File) { - t.Helper() +func TestResponseWriterSmartCleanDeletesStaleFile(t *testing.T) { + t.Parallel() + outDir := t.TempDir() + stalePath := filepath.Join(outDir, "stale.go") + require.NoError(t, os.WriteFile(stalePath, []byte("package stale\n"), 0600)) + + // Generate only foo.go; stale.go should be deleted. + runResponseWriter(t, outDir, true, newResponseFile("foo.go", "package foo\n")) + + _, err := os.Stat(stalePath) + require.ErrorIs(t, err, os.ErrNotExist) + data, err := os.ReadFile(filepath.Join(outDir, "foo.go")) + require.NoError(t, err) + require.Equal(t, "package foo\n", string(data)) +} + +func TestResponseWriterSmartCleanPreservesMtimeForUnchanged(t *testing.T) { + t.Parallel() + outDir := t.TempDir() + content := "package foo\n" + filePath := filepath.Join(outDir, "foo.go") + require.NoError(t, os.WriteFile(filePath, []byte(content), 0600)) + past := time.Now().Add(-time.Hour) + require.NoError(t, os.Chtimes(filePath, past, past)) + + runResponseWriter(t, outDir, true, newResponseFile("foo.go", content)) + + info, err := os.Stat(filePath) + require.NoError(t, err) + require.Equal(t, past.Truncate(time.Second), info.ModTime().Truncate(time.Second)) +} + +func TestResponseWriterSmartCleanMixedFiles(t *testing.T) { + t.Parallel() + outDir := t.TempDir() + unchangedContent := "package unchanged\n" + unchangedPath := filepath.Join(outDir, "unchanged.go") + changedPath := filepath.Join(outDir, "changed.go") + stalePath := filepath.Join(outDir, "stale.go") + require.NoError(t, os.WriteFile(unchangedPath, []byte(unchangedContent), 0600)) + require.NoError(t, os.WriteFile(changedPath, []byte("package old\n"), 0600)) + require.NoError(t, os.WriteFile(stalePath, []byte("package stale\n"), 0600)) + past := time.Now().Add(-time.Hour) + require.NoError(t, os.Chtimes(unchangedPath, past, past)) + require.NoError(t, os.Chtimes(changedPath, past, past)) + + runResponseWriter(t, outDir, true, + newResponseFile("unchanged.go", unchangedContent), + newResponseFile("changed.go", "package changed\n"), + newResponseFile("new.go", "package new\n"), + ) + + // Unchanged: mtime preserved. + unchangedInfo, err := os.Stat(unchangedPath) + require.NoError(t, err) + require.Equal(t, past.Truncate(time.Second), unchangedInfo.ModTime().Truncate(time.Second)) + + // Changed: new content, updated mtime. + changedData, err := os.ReadFile(changedPath) + require.NoError(t, err) + require.Equal(t, "package changed\n", string(changedData)) + changedInfo, err := os.Stat(changedPath) + require.NoError(t, err) + require.Greater(t, changedInfo.ModTime(), past) + + // New: created. + newData, err := os.ReadFile(filepath.Join(outDir, "new.go")) + require.NoError(t, err) + require.Equal(t, "package new\n", string(newData)) + + // Stale: deleted. + _, err = os.Stat(stalePath) + require.ErrorIs(t, err, os.ErrNotExist) +} + +func TestResponseWriterZipPreservesMtimeWhenUnchanged(t *testing.T) { + t.Parallel() + outDir := t.TempDir() + outFile := filepath.Join(outDir, "output.zip") + + // First run creates the zip. + runResponseWriter(t, outFile, false, newResponseFile("foo.go", "package foo\n")) + require.FileExists(t, outFile) + + past := time.Now().Add(-time.Hour) + require.NoError(t, os.Chtimes(outFile, past, past)) + + // Second run with identical content should not rewrite the zip. + runResponseWriter(t, outFile, false, newResponseFile("foo.go", "package foo\n")) + + info, err := os.Stat(outFile) + require.NoError(t, err) + require.Equal(t, past.Truncate(time.Second), info.ModTime().Truncate(time.Second)) +} + +func TestResponseWriterZipUpdatesWhenChanged(t *testing.T) { + t.Parallel() + outDir := t.TempDir() + outFile := filepath.Join(outDir, "output.zip") + + runResponseWriter(t, outFile, false, newResponseFile("foo.go", "package foo\n")) + + past := time.Now().Add(-time.Hour) + require.NoError(t, os.Chtimes(outFile, past, past)) + + // Second run with different content should rewrite. + runResponseWriter(t, outFile, false, newResponseFile("foo.go", "package bar\n")) + + info, err := os.Stat(outFile) + require.NoError(t, err) + require.Greater(t, info.ModTime(), past) +} + +func TestResponseWriterSmartCleanRemovesEmptyDirs(t *testing.T) { + t.Parallel() + outDir := t.TempDir() + subDir := filepath.Join(outDir, "subpkg") + require.NoError(t, os.MkdirAll(subDir, 0755)) + // Pre-existing file in a subdirectory that will become stale. + require.NoError(t, os.WriteFile(filepath.Join(subDir, "stale.go"), []byte("package stale\n"), 0600)) + + // Generate only to the root dir; nothing goes into subpkg. + runResponseWriter(t, outDir, true, newResponseFile("foo.go", "package foo\n")) + + // stale.go deleted, subpkg now empty and also removed. + _, err := os.Stat(subDir) + require.ErrorIs(t, err, os.ErrNotExist) +} + +func TestResponseWriterSmartCleanRemovesNestedEmptyDirs(t *testing.T) { + t.Parallel() + outDir := t.TempDir() + // Create a/b/c/stale.go - all three directories should be removed once + // stale.go is deleted, because each parent becomes empty after its child + // is removed. + require.NoError(t, os.MkdirAll(filepath.Join(outDir, "a", "b", "c"), 0755)) + require.NoError(t, os.WriteFile(filepath.Join(outDir, "a", "b", "c", "stale.go"), []byte("package stale\n"), 0600)) + require.NoError(t, os.WriteFile(filepath.Join(outDir, "a", "b", "kept.go"), []byte("package kept\n"), 0600)) + // a/d is a pre-existing empty directory with no files. + require.NoError(t, os.MkdirAll(filepath.Join(outDir, "a", "d"), 0755)) + + runResponseWriter(t, outDir, true, + newResponseFile("foo.go", "package foo\n"), + newResponseFile("a/b/kept.go", "package kept\n"), + ) + + // a/b/c removed (stale file deleted, dir now empty). + _, err := os.Stat(filepath.Join(outDir, "a", "b", "c")) + require.ErrorIs(t, err, os.ErrNotExist) + // a/d removed (pre-existing empty directory). + _, err = os.Stat(filepath.Join(outDir, "a", "d")) + require.ErrorIs(t, err, os.ErrNotExist) + // a/b still present because a/b/kept.go is generated output. + require.FileExists(t, filepath.Join(outDir, "a", "b", "kept.go")) + require.DirExists(t, filepath.Join(outDir, "a", "b")) + require.DirExists(t, filepath.Join(outDir, "a")) +} + +func TestResponseWriterSmartCleanMultiplePluginsSameOutDir(t *testing.T) { + t.Parallel() + outDir := t.TempDir() + // Pre-populate the output directory with a stale file that neither plugin will write. + stalePath := filepath.Join(outDir, "stale.go") + require.NoError(t, os.WriteFile(stalePath, []byte("package stale\n"), 0600)) + writer := NewResponseWriter( slogtestext.NewLogger(t), storageos.NewProvider(), ResponseWriterWithCreateOutDirIfNotExists(), + ResponseWriterWithDeleteOuts(), ) + // First plugin writes foo.go. require.NoError(t, writer.AddResponse( t.Context(), - &pluginpb.CodeGeneratorResponse{File: files}, + &pluginpb.CodeGeneratorResponse{File: []*pluginpb.CodeGeneratorResponse_File{ + newResponseFile("foo.go", "package foo\n"), + }}, outDir, )) - require.NoError(t, writer.Close()) + // Second plugin writes bar.go to the same directory. + require.NoError(t, writer.AddResponse( + t.Context(), + &pluginpb.CodeGeneratorResponse{File: []*pluginpb.CodeGeneratorResponse_File{ + newResponseFile("bar.go", "package bar\n"), + }}, + outDir, + )) + require.NoError(t, writer.Close(t.Context())) + + // Stale file must be deleted. + _, err := os.Stat(stalePath) + require.ErrorIs(t, err, os.ErrNotExist) + // Both plugin outputs must exist. + fooData, err := os.ReadFile(filepath.Join(outDir, "foo.go")) + require.NoError(t, err) + require.Equal(t, "package foo\n", string(fooData)) + barData, err := os.ReadFile(filepath.Join(outDir, "bar.go")) + require.NoError(t, err) + require.Equal(t, "package bar\n", string(barData)) +} + +func runResponseWriter(t *testing.T, outPath string, deleteOuts bool, files ...*pluginpb.CodeGeneratorResponse_File) { + t.Helper() + opts := []ResponseWriterOption{ + ResponseWriterWithCreateOutDirIfNotExists(), + } + if deleteOuts { + opts = append(opts, ResponseWriterWithDeleteOuts()) + } + writer := NewResponseWriter( + slogtestext.NewLogger(t), + storageos.NewProvider(), + opts..., + ) + require.NoError(t, writer.AddResponse( + t.Context(), + &pluginpb.CodeGeneratorResponse{File: files}, + outPath, + )) + require.NoError(t, writer.Close(t.Context())) } func newResponseFile(name, content string) *pluginpb.CodeGeneratorResponse_File {