diff --git a/docs/notes/2.31.x.md b/docs/notes/2.31.x.md index 57ba60d9e7f..2f0bc2900d3 100644 --- a/docs/notes/2.31.x.md +++ b/docs/notes/2.31.x.md @@ -41,6 +41,7 @@ Java first party dependency inference logic [now](https://github.com/pantsbuild/ Updated to use Coursier v2.1.24 by default to pick up a bug fix allowing us to [simplify our code a bit](https://github.com/pantsbuild/pants/pull/22906). + #### Python A variety of Pex options to support building [native executables diff --git a/docs/notes/2.32.x.md b/docs/notes/2.32.x.md index a161ceeee9f..ff71cc7620e 100644 --- a/docs/notes/2.32.x.md +++ b/docs/notes/2.32.x.md @@ -28,6 +28,13 @@ The plugin API's `Get()` and `MultiGet()` constructs, deprecated in 2.30, are no ### Backends +### Remote Execution + +Upgraded RE API to v2.2 + - Checked in the latest dump of proto files from googleapis and bazelbuild_remote-apis reporisotires + - RE API v2.1 deprecates separate Command.output_files and Command.output_directories and unifies into single Command.output_paths field + - RE API v2.2 migrates Platform field from Command to Action + #### Docker The option `[docker].push_on_package` can be used to prevent Docker images from being pushed during packaging, i.e. when `--output` contains `push=True` or `type=registry`. diff --git a/src/rust/process_execution/remote/src/remote_cache.rs b/src/rust/process_execution/remote/src/remote_cache.rs index 5b71f95e864..1d575d0b0e0 100644 --- a/src/rust/process_execution/remote/src/remote_cache.rs +++ b/src/rust/process_execution/remote/src/remote_cache.rs @@ -183,11 +183,8 @@ impl CommandRunner { digest {trie_digest:?} contained a symlink instead.", trie_digest = root_trie.compute_root_digest(), )), - Some(directory::Entry::Directory(_)) => Err(format!( - "Declared output file path {file_path:?} in output \ - digest {trie_digest:?} contained a directory instead.", - trie_digest = root_trie.compute_root_digest(), - )), + // Return None for directories so they can be handled as output_directories + Some(directory::Entry::Directory(_)) => Ok(None), } } @@ -221,36 +218,32 @@ impl CommandRunner { digests.insert(result.stdout_digest); digests.insert(result.stderr_digest); - for output_directory in &command.output_directories { - let (tree, file_digests) = match Self::make_tree_for_output_directory( + // Use output_paths (v2.1+ API) instead of deprecated output_files/output_directories + for output_path in &command.output_paths { + // Check if this path is a file first (more common case) + if let Some(output_file) = Self::extract_output_file(&output_trie, output_path)? { + // It's a file + digests.insert(require_digest(output_file.digest.as_ref())?); + action_result.output_files.push(output_file); + } else if let Some((tree, file_digests)) = Self::make_tree_for_output_directory( &output_trie, - RelativePath::new(output_directory).unwrap(), + RelativePath::new(output_path).unwrap(), )? { - Some(res) => res, - None => continue, - }; - - let tree_digest = crate::remote::store_proto_locally(&self.store, &tree).await?; - digests.insert(tree_digest); - digests.extend(file_digests); - - action_result - .output_directories - .push(remexec::OutputDirectory { - path: output_directory.to_owned(), - tree_digest: Some(tree_digest.into()), - is_topologically_sorted: false, - }); - } - - for output_file_path in &command.output_files { - let output_file = match Self::extract_output_file(&output_trie, output_file_path)? { - Some(output_file) => output_file, - None => continue, - }; - - digests.insert(require_digest(output_file.digest.as_ref())?); - action_result.output_files.push(output_file); + // It's a directory + let tree_digest = crate::remote::store_proto_locally(&self.store, &tree).await?; + digests.insert(tree_digest); + digests.extend(file_digests); + + action_result + .output_directories + .push(remexec::OutputDirectory { + path: output_path.to_owned(), + tree_digest: Some(tree_digest.into()), + is_topologically_sorted: false, + root_directory_digest: None, + }); + } + // If neither file nor directory, skip (path doesn't exist in output) } Ok((action_result, digests.into_iter().collect::>())) diff --git a/src/rust/process_execution/remote/src/remote_cache_tests.rs b/src/rust/process_execution/remote/src/remote_cache_tests.rs index 155d6059d4b..c449dd1bd12 100644 --- a/src/rust/process_execution/remote/src/remote_cache_tests.rs +++ b/src/rust/process_execution/remote/src/remote_cache_tests.rs @@ -1,5 +1,7 @@ // Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). // Licensed under the Apache License, Version 2.0 (see LICENSE). +#![allow(deprecated)] + use std::collections::{BTreeMap, HashSet}; use std::convert::TryInto; use std::sync::Arc; @@ -702,13 +704,11 @@ async fn extract_output_file() { .is_none() ); - // Error if a path has been declared as a file but isn't. - assert_eq!( - crate::remote_cache::CommandRunner::extract_output_file(&input_tree.digest_trie(), "cats",), - Err(format!( - "Declared output file path \"cats\" in output digest {:?} contained a directory instead.", - TestDirectory::nested().digest() - )) + // When a directory is encountered, return None so it can be handled as output_directories + assert!( + crate::remote_cache::CommandRunner::extract_output_file(&input_tree.digest_trie(), "cats",) + .unwrap() + .is_none() ); } @@ -792,8 +792,7 @@ async fn make_action_result_basic() { let command = remexec::Command { arguments: vec!["this is a test".into()], - output_files: vec!["pets/cats/roland.ext".into()], - output_directories: vec!["pets/cats".into()], + output_paths: vec!["pets/cats/roland.ext".into(), "pets/cats".into()], ..Default::default() }; @@ -846,6 +845,7 @@ async fn make_action_result_basic() { path: "pets/cats".to_owned(), tree_digest: Some(TestTree::roland_at_root().digest().into()), is_topologically_sorted: false, + root_directory_digest: None, } ); diff --git a/src/rust/process_execution/remote/src/remote_tests.rs b/src/rust/process_execution/remote/src/remote_tests.rs index 2d352cd0e20..9d2bfdbd7bd 100644 --- a/src/rust/process_execution/remote/src/remote_tests.rs +++ b/src/rust/process_execution/remote/src/remote_tests.rs @@ -1,5 +1,7 @@ // Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). // Licensed under the Apache License, Version 2.0 (see LICENSE). +#![allow(deprecated)] + use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::path::{Path, PathBuf}; use std::time::Duration; @@ -124,9 +126,11 @@ async fn make_execute_request() { value: "value".to_owned(), }, ], - output_files: vec!["other/file.ext".to_owned(), "path/to/file.ext".to_owned()], - output_directories: vec!["directory/name".to_owned()], - platform: Some(remexec::Platform::default()), + output_paths: vec![ + "directory/name".to_owned(), + "other/file.ext".to_owned(), + "path/to/file.ext".to_owned(), + ], ..Default::default() }; @@ -134,14 +138,15 @@ async fn make_execute_request() { command_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "d7b7538a7a57a2b04da51ffffff758036f43ebb92d37b66bd1bb8c6af0030e57", + "3152fb71b660cc9c92fc0494435de7f666b10a55e84af918f4e6c61b22baecd9", ) .unwrap(), - 187, + 185, )) .into(), ), input_root_digest: Some((&input_directory.digest()).into()), + platform: Some(remexec::Platform::default()), ..Default::default() }; @@ -149,10 +154,10 @@ async fn make_execute_request() { action_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "16bf057effe6d18553979a069228f0da81df307c964ea0f162bb60e31070bb27", + "71d5304c409027c67699d5282aa2b2407fd487fc2d3e928d622368bd14b9b535", ) .unwrap(), - 141, + 143, )) .into(), ), @@ -226,14 +231,11 @@ async fn make_execute_request_with_instance_name() { value: "value".to_owned(), }, ], - output_files: vec!["other/file.ext".to_owned(), "path/to/file.ext".to_owned()], - output_directories: vec!["directory/name".to_owned()], - platform: Some(remexec::Platform { - properties: vec![remexec::platform::Property { - name: "target_platform".to_owned(), - value: "apple-2e".to_owned(), - }], - }), + output_paths: vec![ + "directory/name".to_owned(), + "other/file.ext".to_owned(), + "path/to/file.ext".to_owned(), + ], ..Default::default() }; @@ -241,14 +243,20 @@ async fn make_execute_request_with_instance_name() { command_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "9f8a65e780495003c341923b62a06ae6796dcad47e396dc89704b10bc26e1729", + "3152fb71b660cc9c92fc0494435de7f666b10a55e84af918f4e6c61b22baecd9", ) .unwrap(), - 216, + 185, )) .into(), ), input_root_digest: Some((&input_directory.digest()).into()), + platform: Some(remexec::Platform { + properties: vec![remexec::platform::Property { + name: "target_platform".to_owned(), + value: "apple-2e".to_owned(), + }], + }), ..Default::default() }; @@ -257,10 +265,10 @@ async fn make_execute_request_with_instance_name() { action_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "5b017857389d245cd0663105f3b8ee47bb7412940e4859098c8af46bdd21c8b6", + "dc65bb3934a5baf532076a4feac017ec6515183d1250d28086bd4086cbbc8b28", ) .unwrap(), - 141, + 172, )) .into(), ), @@ -337,9 +345,11 @@ async fn make_execute_request_with_cache_key_gen_version() { value: "value".to_owned(), }, ], - output_files: vec!["other/file.ext".to_owned(), "path/to/file.ext".to_owned()], - output_directories: vec!["directory/name".to_owned()], - platform: Some(remexec::Platform::default()), + output_paths: vec![ + "directory/name".to_owned(), + "other/file.ext".to_owned(), + "path/to/file.ext".to_owned(), + ], ..Default::default() }; want_command @@ -350,14 +360,15 @@ async fn make_execute_request_with_cache_key_gen_version() { command_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "04ed10b1ddac69249ad1ca463fd4284c4f9c0115a2f2aaf1fd8a9ce6571ee29c", + "01fa005270a638d4fd098ce95d0e02d56336954179349b182fb09b41f65103ec", ) .unwrap(), - 224, + 222, )) .into(), ), input_root_digest: Some((&input_directory.digest()).into()), + platform: Some(remexec::Platform::default()), ..Default::default() }; @@ -365,10 +376,10 @@ async fn make_execute_request_with_cache_key_gen_version() { action_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "e55329e2c0413a6def422752f9e964204e7e40ec81e2867a6222a43727ba29d1", + "f4183da326d105472405d62397f91e4720aef220bcb36ecd7bb43f03a481d340", ) .unwrap(), - 141, + 143, )) .into(), ), @@ -413,12 +424,6 @@ async fn make_execute_request_with_jdk() { value: "linux_x86_64".to_owned(), }, ], - platform: Some(remexec::Platform { - properties: vec![remexec::platform::Property { - name: "JDK_SYMLINK".to_owned(), - value: ".jdk".to_owned(), - }], - }), ..Default::default() }; @@ -426,14 +431,20 @@ async fn make_execute_request_with_jdk() { command_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "45e72f32f1d935e02732e26a8aaec041877811a9b7fe66816ace7b570173953e", + "d825e7cf83e2c16e867709013f1c907f00ed6a4d99bba75b1d48dfecab446a29", ) .unwrap(), - 142, + 119, )) .into(), ), input_root_digest: Some((&input_directory.digest()).into()), + platform: Some(remexec::Platform { + properties: vec![remexec::platform::Property { + name: "JDK_SYMLINK".to_owned(), + value: ".jdk".to_owned(), + }], + }), ..Default::default() }; @@ -441,10 +452,10 @@ async fn make_execute_request_with_jdk() { action_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "2868a54befe3ad9d8fd2ac30c2a170ac890715ec0b196ab8259e8b6beabf7d1c", + "2954ca7842d8885f5bb261e100894a7fcc649cffa09c240f181a9c114530b8ce", ) .unwrap(), - 141, + 163, )) .into(), ), @@ -493,6 +504,21 @@ async fn make_execute_request_with_jdk_and_extra_platform_properties() { value: "linux_x86_64".to_owned(), }, ], + ..Default::default() + }; + + let want_action = remexec::Action { + command_digest: Some( + (&Digest::new( + Fingerprint::from_hex_string( + "8d9609abb66f4fd6d554b198c30f6393974dca1528de8911b93070184ce5c264", + ) + .unwrap(), + 120, + )) + .into(), + ), + input_root_digest: Some((&input_directory.digest()).into()), platform: Some(remexec::Platform { properties: vec![ remexec::platform::Property { @@ -520,29 +546,14 @@ async fn make_execute_request_with_jdk_and_extra_platform_properties() { ..Default::default() }; - let want_action = remexec::Action { - command_digest: Some( - (&Digest::new( - Fingerprint::from_hex_string( - "03b368b6f449438938636f57fbaf6b6e2a1eb776583b5197c1320b646ee8d64a", - ) - .unwrap(), - 198, - )) - .into(), - ), - input_root_digest: Some((&input_directory.digest()).into()), - ..Default::default() - }; - let want_execute_request = remexec::ExecuteRequest { action_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "0291bda0da047d715b6da33c1e4c2a74679ab06c95a32424ea754f70be5242ed", + "c4498ce03ed0b5826daf66561041b9ed2df6f794abd09b19066ed29cbb2bea9a", ) .unwrap(), - 141, + 218, )) .into(), ), @@ -608,9 +619,11 @@ async fn make_execute_request_with_timeout() { value: "value".to_owned(), }, ], - output_files: vec!["other/file.ext".to_owned(), "path/to/file.ext".to_owned()], - output_directories: vec!["directory/name".to_owned()], - platform: Some(remexec::Platform::default()), + output_paths: vec![ + "directory/name".to_owned(), + "other/file.ext".to_owned(), + "path/to/file.ext".to_owned(), + ], ..Default::default() }; @@ -618,15 +631,16 @@ async fn make_execute_request_with_timeout() { command_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "d7b7538a7a57a2b04da51ffffff758036f43ebb92d37b66bd1bb8c6af0030e57", + "3152fb71b660cc9c92fc0494435de7f666b10a55e84af918f4e6c61b22baecd9", ) .unwrap(), - 187, + 185, )) .into(), ), input_root_digest: Some((&input_directory.digest()).into()), timeout: Some(prost_types::Duration::try_from(Duration::from_secs(1)).unwrap()), + platform: Some(remexec::Platform::default()), ..Default::default() }; @@ -634,10 +648,10 @@ async fn make_execute_request_with_timeout() { action_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "6e3666265a4ef89ddf26a406516484429b2d8e744fbae6b36a66c6853407626a", + "8931d71fd6d605078f7348f0af3fe4f5e93e979493e15fe7cb895a93949c17ef", ) .unwrap(), - 145, + 147, )) .into(), ), @@ -713,7 +727,6 @@ async fn make_execute_request_with_append_only_caches() { value: "value".to_owned(), }, ], - platform: Some(remexec::Platform::default()), ..Default::default() }; @@ -721,10 +734,10 @@ async fn make_execute_request_with_append_only_caches() { command_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "1deb19eddcefd5074263064a7df2a19caeb4e6d86a849bc07e23a5d856f886ec", + "c3dd90cef0e9e849b9135d66a35e9a68802041dd8184b72b1a658d8a65de9753", ) .unwrap(), - 178, + 176, )) .into(), ), @@ -739,6 +752,7 @@ async fn make_execute_request_with_append_only_caches() { .into(), ), timeout: Some(prost_types::Duration::try_from(Duration::from_secs(1)).unwrap()), + platform: Some(remexec::Platform::default()), ..Default::default() }; @@ -746,10 +760,10 @@ async fn make_execute_request_with_append_only_caches() { action_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "798ac2aaf68b36d571fcb90719e8dafdf5929081c491f178ac7f4663b591183e", + "dd0e3a5b0e6fe5dff28c75febc097eff59d8e6f240eb3601d8854ab165c6d5e7", ) .unwrap(), - 146, + 148, )) .into(), ), @@ -870,9 +884,11 @@ async fn make_execute_request_using_immutable_inputs() { value: "value".to_owned(), }, ], - output_files: vec!["other/file.ext".to_owned(), "path/to/file.ext".to_owned()], - output_directories: vec!["directory/name".to_owned()], - platform: Some(remexec::Platform::default()), + output_paths: vec![ + "directory/name".to_owned(), + "other/file.ext".to_owned(), + "path/to/file.ext".to_owned(), + ], ..Default::default() }; @@ -880,14 +896,15 @@ async fn make_execute_request_using_immutable_inputs() { command_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "d7b7538a7a57a2b04da51ffffff758036f43ebb92d37b66bd1bb8c6af0030e57", + "3152fb71b660cc9c92fc0494435de7f666b10a55e84af918f4e6c61b22baecd9", ) .unwrap(), - 187, + 185, )) .into(), ), input_root_digest: Some((&expected_digest.as_digest()).into()), + platform: Some(remexec::Platform::default()), ..Default::default() }; @@ -895,10 +912,10 @@ async fn make_execute_request_using_immutable_inputs() { action_digest: Some( (&Digest::new( Fingerprint::from_hex_string( - "2c1eae75a54d2464ac63ba51587deb3986f15c3966c61f77fb9b06b195f4127a", + "077171b3ce37e9979aa14236e589dffec075d9960290278abba56b2b2467b5e1", ) .unwrap(), - 141, + 143, )) .into(), ), @@ -2339,6 +2356,7 @@ async fn extract_output_files_from_response_just_directory() { path: "cats".into(), tree_digest: Some(test_tree.digest().into()), is_topologically_sorted: false, + root_directory_digest: None, }], ..Default::default() }), @@ -2371,11 +2389,13 @@ async fn extract_output_files_from_response_directories_and_files() { path: "pets/cats".into(), tree_digest: Some((&TestTree::roland_at_root().digest()).into()), is_topologically_sorted: false, + root_directory_digest: None, }, remexec::OutputDirectory { path: "pets/dogs".into(), tree_digest: Some((&TestTree::robin_at_root().digest()).into()), is_topologically_sorted: false, + root_directory_digest: None, }, ], ..Default::default() @@ -2405,6 +2425,7 @@ async fn extract_output_files_from_response_no_prefix() { path: String::new(), tree_digest: Some((&TestTree::roland_at_root().digest()).into()), is_topologically_sorted: false, + root_directory_digest: None, }], ..Default::default() }), diff --git a/src/rust/process_execution/src/cache.rs b/src/rust/process_execution/src/cache.rs index 59679f0fb80..ede6b5ae179 100644 --- a/src/rust/process_execution/src/cache.rs +++ b/src/rust/process_execution/src/cache.rs @@ -271,6 +271,7 @@ impl CommandRunner { path: String::new(), tree_digest: Some((&result.output_directory.as_digest()).into()), is_topologically_sorted: false, + root_directory_digest: None, }], stdout_digest: Some((&stdout_digest).into()), stderr_digest: Some((&stderr_digest).into()), diff --git a/src/rust/process_execution/src/lib.rs b/src/rust/process_execution/src/lib.rs index bc06555017d..ed21f2856a7 100644 --- a/src/rust/process_execution/src/lib.rs +++ b/src/rust/process_execution/src/lib.rs @@ -1397,29 +1397,28 @@ pub async fn make_execute_request( }); } - let mut output_files = req - .output_files - .iter() - .map(|p| { + // Combine output_files and output_directories into output_paths (v2.1+ API) + let mut output_paths = Vec::new(); + + for p in &req.output_files { + output_paths.push( p.to_str() .map(str::to_owned) - .ok_or_else(|| format!("Non-UTF8 output file path: {p:?}")) - }) - .collect::, String>>()?; - output_files.sort(); - command.output_files = output_files; + .ok_or_else(|| format!("Non-UTF8 output file path: {p:?}"))?, + ); + } - let mut output_directories = req - .output_directories - .iter() - .map(|p| { + for p in &req.output_directories { + output_paths.push( p.to_str() .map(str::to_owned) - .ok_or_else(|| format!("Non-UTF8 output directory path: {p:?}")) - }) - .collect::, String>>()?; - output_directories.sort(); - command.output_directories = output_directories; + .ok_or_else(|| format!("Non-UTF8 output directory path: {p:?}"))?, + ); + } + + output_paths.sort(); + output_paths.dedup(); // Deduplicate as required by the spec + command.output_paths = output_paths; if let Some(working_directory) = &req.working_directory { // Do not set `working_directory` if a wrapper script is in use because the wrapper script @@ -1444,12 +1443,12 @@ pub async fn make_execute_request( platform_properties.push(("JDK_SYMLINK".to_owned(), ".jdk".to_owned())); } - // Extract `Platform` proto from the `Command` to avoid a partial move of `Command`. - let mut command_platform = command.platform.take().unwrap_or_default(); + // Build the Platform proto (will be set on Action, not Command, per v2.2+ API) + let mut action_platform = remexec::Platform::default(); // Add configured platform properties to the `Platform`. for (name, value) in platform_properties { - command_platform + action_platform .properties .push(remexec::platform::Property { name: name.clone(), @@ -1466,16 +1465,13 @@ pub async fn make_execute_request( // is done by code point, equivalently, by the UTF-8 bytes. // // Note: BuildBarn enforces this requirement. - command_platform + action_platform .properties .sort_by(|x, y| match x.name.cmp(&y.name) { Ordering::Equal => x.value.cmp(&y.value), v => v, }); - // Store the separate copy back into the Command proto. - command.platform = Some(command_platform); - // Sort the environment variables. REv2 spec requires sorting by name for same reasons that // platform properties are sorted, i.e. consistent hashing. command @@ -1499,6 +1495,7 @@ pub async fn make_execute_request( let mut action = remexec::Action { command_digest: Some((&digest(&command)?).into()), input_root_digest: Some(input_root_digest.as_digest().into()), + platform: Some(action_platform), ..remexec::Action::default() }; diff --git a/src/rust/process_executor/src/main.rs b/src/rust/process_executor/src/main.rs index c6ee0b96abb..a641edc06fb 100644 --- a/src/rust/process_executor/src/main.rs +++ b/src/rust/process_executor/src/main.rs @@ -477,6 +477,32 @@ async fn extract_request_from_action_digest( .await .map_err(|e| e.to_string())?; + // Use output_paths if available, otherwise fall back to deprecated output_files/output_directories + let (output_files, output_directories) = if command.output_paths.is_empty() { + // Fall back to deprecated fields for backwards compatibility + #[allow(deprecated)] + let files = command + .output_files + .iter() + .map(RelativePath::new) + .collect::>()?; + #[allow(deprecated)] + let dirs = command + .output_directories + .iter() + .map(RelativePath::new) + .collect::>()?; + (files, dirs) + } else { + // Use the new output_paths field (v2.1+) + let all_paths: BTreeSet = command + .output_paths + .iter() + .map(RelativePath::new) + .collect::>()?; + (all_paths.clone(), all_paths) + }; + let process = process_execution::Process { argv: command.arguments, env: command @@ -491,16 +517,8 @@ async fn extract_request_from_action_digest( .collect(), working_directory, input_digests, - output_files: command - .output_files - .iter() - .map(RelativePath::new) - .collect::>()?, - output_directories: command - .output_directories - .iter() - .map(RelativePath::new) - .collect::>()?, + output_files, + output_directories, timeout: action.timeout.map(|timeout| { Duration::from_nanos(timeout.nanos as u64 + timeout.seconds as u64 * 1000000000) }), diff --git a/src/rust/protos/protos/bazelbuild_remote-apis/README.md b/src/rust/protos/protos/bazelbuild_remote-apis/README.md index 9f88b4a7e35..15685b12f40 100644 --- a/src/rust/protos/protos/bazelbuild_remote-apis/README.md +++ b/src/rust/protos/protos/bazelbuild_remote-apis/README.md @@ -1,3 +1,3 @@ This is a dump of the .proto files from https://github.com/bazelbuild/remote-apis directory build. -This dump was taken at git sha 0afc3700d177bb37ed48438fb50d8bc7f4872874 +This dump was taken at git sha 3051389c06348307437e92e3a1d3c6d6566094b4 diff --git a/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/remote/asset/v1/remote_asset.proto b/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/remote/asset/v1/remote_asset.proto new file mode 100644 index 00000000000..6581f055505 --- /dev/null +++ b/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/remote/asset/v1/remote_asset.proto @@ -0,0 +1,495 @@ +// Copyright 2020 The Bazel Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package build.bazel.remote.asset.v1; + +import "build/bazel/remote/execution/v2/remote_execution.proto"; +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Build.Bazel.Remote.Asset.v1"; +option go_package = "github.com/bazelbuild/remote-apis/build/bazel/remote/asset/v1;remoteasset"; +option java_multiple_files = true; +option java_outer_classname = "RemoteAssetProto"; +option java_package = "build.bazel.remote.asset.v1"; +option objc_class_prefix = "RA"; + +// The Remote Asset API provides a mapping from a URI and Qualifiers to +// Digests. +// +// Multiple URIs may be used to refer to the same content. For example, the +// same tarball may exist at multiple mirrors and thus be retrievable from +// multiple URLs. When URLs are used, these should refer to actual content as +// Fetch service implementations may choose to fetch the content directly +// from the origin. For example, the HEAD of a git repository's active branch +// can be referred to as: +// +// uri: https://github.com/bazelbuild/remote-apis.git +// +// URNs may be used to strongly identify content, for instance by using the +// uuid namespace identifier: urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6. +// This is most applicable to named content that is Push'd, where the URN +// serves as an agreed-upon key, but carries no other inherent meaning. +// +// Service implementations may choose to support only URLs, only URNs for +// Push'd content, only other URIs for which the server and client agree upon +// semantics of, or any mixture of the above. + +// Qualifiers are used to disambiguate or sub-select content that shares a URI. +// This may include specifying a particular commit or branch, in the case of +// URIs referencing a repository; they could also be used to specify a +// particular subdirectory of a repository or tarball. Qualifiers may also be +// used to ensure content matches what the client expects, even when there is +// no ambiguity to be had - for example, a qualifier specifying a checksum +// value. +// +// In cases where the semantics of the request are not immediately clear from +// the URL and/or qualifiers - e.g. dictated by URL scheme - it is recommended +// to use an additional qualifier to remove the ambiguity. The `resource_type` +// qualifier is recommended for this purpose. +// +// Qualifiers may be supplied in any order. +message Qualifier { + // The "name" of the qualifier, for example "resource_type". + // No separation is made between 'standard' and 'nonstandard' + // qualifiers, in accordance with https://tools.ietf.org/html/rfc6648, + // however implementers *SHOULD* take care to avoid ambiguity. + string name = 1; + + // The "value" of the qualifier. Semantics will be dictated by the name. + string value = 2; +} + +// The Fetch service resolves or fetches assets referenced by URI and +// Qualifiers, returning a Digest for the content in +// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. +// +// As with other services in the Remote Execution API, any call may return an +// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing +// information about when the client should retry the request; clients SHOULD +// respect the information provided. +service Fetch { + // Resolve or fetch referenced assets, making them available to the caller and + // other consumers in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. + // + // Servers *MAY* fetch content that they do not already have cached, for any + // URLs they support. + // + // Servers *SHOULD* ensure that referenced files are present in the CAS at the + // time of the response, and (if supported) that they will remain available + // for a reasonable period of time. The lifetimes of the referenced blobs *SHOULD* + // be increased if necessary and applicable. + // In the event that a client receives a reference to content that is no + // longer present, it *MAY* re-issue the request with + // `oldest_content_accepted` set to a more recent timestamp than the original + // attempt, to induce a re-fetch from origin. + // + // Servers *MAY* cache fetched content and reuse it for subsequent requests, + // subject to `oldest_content_accepted`. + // + // Servers *MAY* support the complementary [Push][build.bazel.remote.asset.v1.Push] + // API and allow content to be directly inserted for use in future fetch + // responses. + // + // Servers *MUST* ensure Fetch'd content matches all the specified + // qualifiers except in the case of previously Push'd resources, for which + // the server *MAY* trust the pushing client to have set the qualifiers + // correctly, without validation. + // + // Servers not implementing the complementary [Push][build.bazel.remote.asset.v1.Push] + // API *MUST* reject requests containing qualifiers it does not support. + // + // Servers *MAY* transform assets as part of the fetch. For example a + // tarball fetched by [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory] + // might be unpacked, or a Git repository + // fetched by [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob] + // might be passed through `git-archive`. + // + // Errors handling the requested assets will be returned as gRPC Status errors + // here; errors outside the server's control will be returned inline in the + // `status` field of the response (see comment there for details). + // The possible RPC errors include: + // * `INVALID_ARGUMENT`: One or more arguments were invalid, such as a + // qualifier that is not supported by the server. + // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to + // perform the requested operation. The client may retry after a delay. + // * `UNAVAILABLE`: Due to a transient condition the operation could not be + // completed. The client should retry. + // * `INTERNAL`: An internal error occurred while performing the operation. + // The client should retry. + // * `DEADLINE_EXCEEDED`: The fetch could not be completed within the given + // RPC deadline. The client should retry for at least as long as the value + // provided in `timeout` field of the request. + // + // In the case of unsupported qualifiers, the server *SHOULD* additionally + // send a [BadRequest][google.rpc.BadRequest] error detail where, for each + // unsupported qualifier, there is a `FieldViolation` with a `field` of + // `qualifiers.name` and a `description` of `"{qualifier}" not supported` + // indicating the name of the unsupported qualifier. + rpc FetchBlob(FetchBlobRequest) returns (FetchBlobResponse) { + option (google.api.http) = { post: "/v1/{instance_name=**}/assets:fetchBlob" body: "*" }; + } + rpc FetchDirectory(FetchDirectoryRequest) returns (FetchDirectoryResponse) { + option (google.api.http) = { post: "/v1/{instance_name=**}/assets:fetchDirectory" body: "*" }; + } +} + +// A request message for +// [Fetch.FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]. +message FetchBlobRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // The timeout for the underlying fetch, if content needs to be retrieved from + // origin. + // + // If unset, the server *MAY* apply an implementation-defined timeout. + // + // If set, and the user-provided timeout exceeds the RPC deadline, the server + // *SHOULD* keep the fetch going after the RPC completes, to be made + // available for future Fetch calls. The server may also enforce (via clamping + // and/or an INVALID_ARGUMENT error) implementation-defined minimum and + // maximum timeout values. + // + // If this timeout is exceeded on an attempt to retrieve content from origin + // the client will receive DEADLINE_EXCEEDED in [FetchBlobResponse.status]. + google.protobuf.Duration timeout = 2; + + // The oldest content the client is willing to accept, as measured from the + // time it was Push'd or when the underlying retrieval from origin was + // started. + // Upon retries of Fetch requests that cannot be completed within a single + // RPC, clients *SHOULD* provide the same value for subsequent requests as the + // original, to simplify combining the request with the previous attempt. + // + // If unset, the client *SHOULD* accept content of any age. + google.protobuf.Timestamp oldest_content_accepted = 3; + + // The URI(s) of the content to fetch. These may be resources that the server + // can directly fetch from origin, in which case multiple URIs *SHOULD* + // represent the same content available at different locations (such as an + // origin and secondary mirrors). These may also be URIs for content known to + // the server through other mechanisms, e.g. pushed via the [Push][build.bazel.remote.asset.v1.Push] + // service. + // + // Clients *MUST* supply at least one URI. Servers *MAY* match any one of the + // supplied URIs. + repeated string uris = 4; + + // Qualifiers sub-specifying the content to fetch - see comments on + // [Qualifier][build.bazel.remote.asset.v1.Qualifier]. + // The same qualifiers apply to all URIs. + // + // Specified qualifier names *MUST* be unique. + repeated Qualifier qualifiers = 5; + + // The digest function the server must use to compute the digest. + // + // If unset, the server SHOULD default to SHA256. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 6; +} + +// A response message for +// [Fetch.FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]. +message FetchBlobResponse { + // If the status has a code other than `OK`, it indicates that the operation + // was unable to be completed for reasons outside the servers' control. + // The possible fetch errors include: + // * `DEADLINE_EXCEEDED`: The operation could not be completed within the + // specified timeout. + // * `NOT_FOUND`: The requested asset was not found at the specified location. + // * `PERMISSION_DENIED`: The request was rejected by a remote server, or + // requested an asset from a disallowed origin. + // * `ABORTED`: The operation could not be completed, typically due to a + // failed consistency check. + // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to + // perform the requested operation. The client may retry after a delay. + google.rpc.Status status = 1; + + // The uri from the request that resulted in a successful retrieval, or from + // which the error indicated in `status` was obtained. + string uri = 2; + + // Any qualifiers known to the server and of interest to clients. + repeated Qualifier qualifiers = 3; + + // A minimum timestamp the content is expected to be available through. + // Servers *MAY* omit this field, if not known with confidence. + google.protobuf.Timestamp expires_at = 4; + + // The result of the fetch, if the status had code `OK`. + // The digest of the file's contents, available for download through the CAS. + build.bazel.remote.execution.v2.Digest blob_digest = 5; + + // This field SHOULD be set to the digest function that was used by the server + // to compute [FetchBlobResponse.blob_digest]. + // Clients could use this to determine whether the server honors + // [FetchBlobRequest.digest_function] that was set in the request. + // + // If unset, clients SHOULD default to use SHA256 regardless of the requested + // [FetchBlobRequest.digest_function]. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 6; +} + +// A request message for +// [Fetch.FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]. +message FetchDirectoryRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // The timeout for the underlying fetch, if content needs to be retrieved from + // origin. This value is allowed to exceed the RPC deadline, in which case the + // server *SHOULD* keep the fetch going after the RPC completes, to be made + // available for future Fetch calls. + // + // If this timeout is exceeded on an attempt to retrieve content from origin + // the client will receive DEADLINE_EXCEEDED in [FetchDirectoryResponse.status]. + google.protobuf.Duration timeout = 2; + + // The oldest content the client is willing to accept, as measured from the + // time it was Push'd or when the underlying retrieval from origin was + // started. + // Upon retries of Fetch requests that cannot be completed within a single + // RPC, clients *SHOULD* provide the same value for subsequent requests as the + // original, to simplify combining the request with the previous attempt. + // + // If unset, the client *SHOULD* accept content of any age. + google.protobuf.Timestamp oldest_content_accepted = 3; + + // The URI(s) of the content to fetch. These may be resources that the server + // can directly fetch from origin, in which case multiple URIs *SHOULD* + // represent the same content available at different locations (such as an + // origin and secondary mirrors). These may also be URIs for content known to + // the server through other mechanisms, e.g. pushed via the [Push][build.bazel.remote.asset.v1.Push] + // service. + // + // Clients *MUST* supply at least one URI. Servers *MAY* match any one of the + // supplied URIs. + repeated string uris = 4; + + // Qualifiers sub-specifying the content to fetch - see comments on + // [Qualifier][build.bazel.remote.asset.v1.Qualifier]. + // The same qualifiers apply to all URIs. + // + // Specified qualifier names *MUST* be unique. + repeated Qualifier qualifiers = 5; + + // The digest function the server must use to compute the digest. + // + // If unset, the server SHOULD default to SHA256. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 6; +} + +// A response message for +// [Fetch.FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]. +message FetchDirectoryResponse { + // If the status has a code other than `OK`, it indicates that the operation + // was unable to be completed for reasons outside the servers' control. + // The possible fetch errors include: + // * `DEADLINE_EXCEEDED`: The operation could not be completed within the + // specified timeout. + // * `NOT_FOUND`: The requested asset was not found at the specified location. + // * `PERMISSION_DENIED`: The request was rejected by a remote server, or + // requested an asset from a disallowed origin. + // * `ABORTED`: The operation could not be completed, typically due to a + // failed consistency check. + // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to + // perform the requested operation. The client may retry after a delay. + google.rpc.Status status = 1; + + // The uri from the request that resulted in a successful retrieval, or from + // which the error indicated in `status` was obtained. + string uri = 2; + + // Any qualifiers known to the server and of interest to clients. + repeated Qualifier qualifiers = 3; + + // A minimum timestamp the content is expected to be available through. + // Servers *MAY* omit this field, if not known with confidence. + google.protobuf.Timestamp expires_at = 4; + + // The result of the fetch, if the status had code `OK`. + // the root digest of a directory tree, suitable for fetching via + // [ContentAddressableStorage.GetTree]. + build.bazel.remote.execution.v2.Digest root_directory_digest = 5; + + // This field SHOULD be set to the digest function that was used by the server + // to compute [FetchBlobResponse.root_directory_digest]. + // Clients could use this to determine whether the server honors + // [FetchDirectoryRequest.digest_function] that was set in the request. + // + // If unset, clients SHOULD default to use SHA256 regardless of the requested + // [FetchDirectoryRequest.digest_function]. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 6; +} + +// The Push service is complementary to the Fetch, and allows for +// associating contents of URLs to be returned in future Fetch API calls. +// +// As with other services in the Remote Execution API, any call may return an +// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing +// information about when the client should retry the request; clients SHOULD +// respect the information provided. +service Push { + // These APIs associate the identifying information of a resource, as + // indicated by URI and optionally Qualifiers, with content available in the + // CAS. For example, associating a repository url and a commit id with a + // Directory Digest. + // + // Servers *SHOULD* only allow trusted clients to associate content, and *MAY* + // only allow certain URIs to be pushed. + // + // Clients *MUST* ensure associated content is available in CAS prior to + // pushing. + // + // Clients *MUST* ensure the Qualifiers listed correctly match the contents, + // and Servers *MAY* trust these values without validation. + // Fetch servers *MAY* require exact match of all qualifiers when returning + // content previously pushed, or allow fetching content with only a subset of + // the qualifiers specified on Push. + // + // Clients can specify expiration information that the server *SHOULD* + // respect. Subsequent requests can be used to alter the expiration time. + // + // A minimal compliant Fetch implementation may support only Push'd content + // and return `NOT_FOUND` for any resource that was not pushed first. + // Alternatively, a compliant implementation may choose to not support Push + // and only return resources that can be Fetch'd from origin. + // + // Errors will be returned as gRPC Status errors. + // The possible RPC errors include: + // * `INVALID_ARGUMENT`: One or more arguments to the RPC were invalid. + // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to + // perform the requested operation. The client may retry after a delay. + // * `UNAVAILABLE`: Due to a transient condition the operation could not be + // completed. The client should retry. + // * `INTERNAL`: An internal error occurred while performing the operation. + // The client should retry. + rpc PushBlob(PushBlobRequest) returns (PushBlobResponse) { + option (google.api.http) = { post: "/v1/{instance_name=**}/assets:pushBlob" body: "*" }; + } + + rpc PushDirectory(PushDirectoryRequest) returns (PushDirectoryResponse) { + option (google.api.http) = { post: "/v1/{instance_name=**}/assets:pushDirectory" body: "*" }; + } +} + +// A request message for +// [Push.PushBlob][build.bazel.remote.asset.v1.Push.PushBlob]. +message PushBlobRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // The URI(s) of the content to associate. If multiple URIs are specified, the + // pushed content will be available to fetch by specifying any of them. + repeated string uris = 2; + + // Qualifiers sub-specifying the content that is being pushed - see comments + // on [Qualifier][build.bazel.remote.asset.v1.Qualifier]. + // The same qualifiers apply to all URIs. + repeated Qualifier qualifiers = 3; + + // A time after which this content should stop being returned via [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]. + // Servers *MAY* expire content early, e.g. due to storage pressure. + google.protobuf.Timestamp expire_at = 4; + + // The blob to associate. + build.bazel.remote.execution.v2.Digest blob_digest = 5; + + // Referenced blobs or directories that need to not expire before expiration + // of this association, in addition to `blob_digest` itself. + // These fields are hints - clients *MAY* omit them, and servers *SHOULD* + // respect them, at the risk of increased incidents of Fetch responses + // indirectly referencing unavailable blobs. + repeated build.bazel.remote.execution.v2.Digest references_blobs = 6; + repeated build.bazel.remote.execution.v2.Digest references_directories = 7; + + // The digest function that was used to compute the blob digest. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the action digest hash and the digest functions announced + // in the server's capabilities. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 8; +} + +// A response message for +// [Push.PushBlob][build.bazel.remote.asset.v1.Push.PushBlob]. +message PushBlobResponse { /* empty */ } + +// A request message for +// [Push.PushDirectory][build.bazel.remote.asset.v1.Push.PushDirectory]. +message PushDirectoryRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // The URI(s) of the content to associate. If multiple URIs are specified, the + // pushed content will be available to fetch by specifying any of them. + repeated string uris = 2; + + // Qualifiers sub-specifying the content that is being pushed - see comments + // on [Qualifier][build.bazel.remote.asset.v1.Qualifier]. + // The same qualifiers apply to all URIs. + repeated Qualifier qualifiers = 3; + + // A time after which this content should stop being returned via + // [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]. + // Servers *MAY* expire content early, e.g. due to storage pressure. + google.protobuf.Timestamp expire_at = 4; + + // Directory to associate + build.bazel.remote.execution.v2.Digest root_directory_digest = 5; + + // Referenced blobs or directories that need to not expire before expiration + // of this association, in addition to `root_directory_digest` itself. + // These fields are hints - clients *MAY* omit them, and servers *SHOULD* + // respect them, at the risk of increased incidents of Fetch responses + // indirectly referencing unavailable blobs. + repeated build.bazel.remote.execution.v2.Digest references_blobs = 6; + repeated build.bazel.remote.execution.v2.Digest references_directories = 7; + + // The digest function that was used to compute blob digests. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the action digest hash and the digest functions announced + // in the server's capabilities. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 8; +} + +// A response message for +// [Push.PushDirectory][build.bazel.remote.asset.v1.Push.PushDirectory]. +message PushDirectoryResponse { /* empty */ } diff --git a/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/remote/execution/v2/remote_execution.proto b/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/remote/execution/v2/remote_execution.proto index fc0519822e7..40e3491765b 100644 --- a/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/remote/execution/v2/remote_execution.proto +++ b/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/remote/execution/v2/remote_execution.proto @@ -104,7 +104,12 @@ service Execution { // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail // where, for each requested blob not present in the CAS, there is a // `Violation` with a `type` of `MISSING` and a `subject` of - // `"blobs/{hash}/{size}"` indicating the digest of the missing blob. + // `"blobs/{digest_function/}{hash}/{size}"` indicating the digest of the + // missing blob. The `subject` is formatted the same way as the + // `resource_name` provided to + // [ByteStream.Read][google.bytestream.ByteStream.Read], with the leading + // instance name omitted. `digest_function` MUST thus be omitted if its value + // is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512, or VSO. // // The server does not need to guarantee that a call to this method leads to // at most one execution of the action. The server MAY execute the action @@ -120,6 +125,14 @@ service Execution { // operation completes, and then respond with the completed operation. The // server MAY choose to stream additional updates as execution progresses, // such as to provide an update as to the state of the execution. + // + // In addition to the cases describe for Execute, the WaitExecution method + // may fail as follows: + // + // * `NOT_FOUND`: The operation no longer exists due to any of a transient + // condition, an unknown operation name, or if the server implements the + // Operations API DeleteOperation method and it was called for the current + // execution. The client should call `Execute` to retry. rpc WaitExecution(WaitExecutionRequest) returns (stream google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{name=operations/**}:waitExecution" body: "*" }; } @@ -162,7 +175,7 @@ service ActionCache { // // In order to allow the server to perform access control based on the type of // action, and to assist with client debugging, the client MUST first upload - // the [Action][build.bazel.remote.execution.v2.Execution] that produced the + // the [Action][build.bazel.remote.execution.v2.Action] that produced the // result, along with its // [Command][build.bazel.remote.execution.v2.Command], into the // `ContentAddressableStorage`. @@ -204,20 +217,27 @@ service ActionCache { // [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. // // For uncompressed data, The `WriteRequest.resource_name` is of the following form: -// `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}` +// `{instance_name}/uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}` // // Where: -// * `instance_name` is an identifier, possibly containing multiple path -// segments, used to distinguish between the various instances on the server, -// in a manner defined by the server. If it is the empty path, the leading -// slash is omitted, so that the `resource_name` becomes -// `uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`. +// * `instance_name` is an identifier used to distinguish between the various +// instances on the server. Syntax and semantics of this field are defined +// by the server; Clients must not make any assumptions about it (e.g., +// whether it spans multiple path segments or not). If it is the empty path, +// the leading slash is omitted, so that the `resource_name` becomes +// `uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`. // To simplify parsing, a path segment cannot equal any of the following // keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`, // `capabilities` or `compressed-blobs`. // * `uuid` is a version 4 UUID generated by the client, used to avoid // collisions between concurrent uploads of the same data. Clients MAY // reuse the same `uuid` for uploading different blobs. +// * `digest_function` is a lowercase string form of a `DigestFunction.Value` +// enum, indicating which digest function was used to compute `hash`. If the +// digest function used is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512, +// or VSO, this component MUST be omitted. In that case the server SHOULD +// infer the digest function using the length of the `hash` and the digest +// functions announced in the server's capabilities. // * `hash` and `size` refer to the [Digest][build.bazel.remote.execution.v2.Digest] // of the data being uploaded. // * `optional_metadata` is implementation specific data, which clients MAY omit. @@ -225,10 +245,11 @@ service ActionCache { // // Data can alternatively be uploaded in compressed form, with the following // `WriteRequest.resource_name` form: -// `{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}{/optional_metadata}` +// `{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}{/optional_metadata}` // // Where: -// * `instance_name`, `uuid` and `optional_metadata` are defined as above. +// * `instance_name`, `uuid`, `digest_function` and `optional_metadata` are +// defined as above. // * `compressor` is a lowercase string form of a `Compressor.Value` enum // other than `identity`, which is supported by the server and advertised in // [CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor]. @@ -271,15 +292,17 @@ service ActionCache { // [Read method][google.bytestream.ByteStream.Read] of the ByteStream API. // // For uncompressed data, The `ReadRequest.resource_name` is of the following form: -// `{instance_name}/blobs/{hash}/{size}` -// Where `instance_name`, `hash` and `size` are defined as for uploads. +// `{instance_name}/blobs/{digest_function/}{hash}/{size}` +// Where `instance_name`, `digest_function`, `hash` and `size` are defined as +// for uploads. // // Data can alternatively be downloaded in compressed form, with the following // `ReadRequest.resource_name` form: -// `{instance_name}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}` +// `{instance_name}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}` // // Where: -// * `instance_name` and `compressor` are defined as for uploads. +// * `instance_name`, `compressor` and `digest_function` are defined as for +// uploads. // * `uncompressed_hash` and `uncompressed_size` refer to the // [Digest][build.bazel.remote.execution.v2.Digest] of the data being // downloaded, once uncompressed. Clients MUST verify that these match @@ -302,6 +325,15 @@ service ActionCache { // each of the compression formats that the server supports, as well as in // uncompressed form. // +// Additionally, ByteStream requests MAY come with an additional plain text header +// that indicates the `resource_name` of the blob being sent. The header, if +// present, MUST follow the following convention: +// * name: `build.bazel.remote.execution.v2.resource-name`. +// * contents: the plain text resource_name of the request message. +// If set, the contents of the header MUST match the `resource_name` of the request +// message. Servers MAY use this header to assist in routing requests to the +// appropriate backend. +// // The lifetime of entries in the CAS is implementation specific, but it SHOULD // be long enough to allow for newly-added and recently looked-up entries to be // used in subsequent calls (e.g. to @@ -407,6 +439,110 @@ service ContentAddressableStorage { rpc GetTree(GetTreeRequest) returns (stream GetTreeResponse) { option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" }; } + + // Split a blob into chunks. + // + // This call splits a blob into chunks, stores the chunks in the CAS, and + // returns a list of the chunk digests. Using this list, a client can check + // which chunks are locally available and just fetch the missing ones. The + // desired blob can be assembled by concatenating the fetched chunks in the + // order of the digests in the list. + // + // This rpc can be used to reduce the required data to download a large blob + // from CAS if chunks from earlier downloads of a different version of this + // blob are locally available. For this procedure to work properly, blobs + // SHOULD be split in a content-defined way, rather than with fixed-sized + // chunking. + // + // If a split request is answered successfully, a client can expect the + // following guarantees from the server: + // 1. The blob chunks are stored in CAS. + // 2. Concatenating the blob chunks in the order of the digest list returned + // by the server results in the original blob. + // + // Servers which implement this functionality MUST declare that they support + // it by setting the + // [CacheCapabilities.split_blob_support][build.bazel.remote.execution.v2.CacheCapabilities.split_blob_support] + // field accordingly. + // + // Clients MUST check that the server supports this capability, before using + // it. + // + // Clients SHOULD verify that the digest of the blob assembled by the fetched + // chunks is equal to the requested blob digest. + // + // The lifetimes of the generated chunk blobs MAY be independent of the + // lifetime of the original blob. In particular: + // * A blob and any chunk derived from it MAY be evicted from the CAS at + // different times. + // * A call to [SplitBlob][build.bazel.remote.execution.v2.ContentAddressableStorage.SplitBlob] + // extends the lifetime of the original blob, and sets the lifetimes of + // the resulting chunks (or extends the lifetimes of already-existing + // chunks). + // * Touching a chunk extends its lifetime, but the server MAY choose not + // to extend the lifetime of the original blob. + // * Touching the original blob extends its lifetime, but the server MAY + // choose not to extend the lifetimes of chunks derived from it. + // + // When blob splitting and splicing is used at the same time, the clients and + // the server SHOULD agree out-of-band upon a chunking algorithm used by both + // parties to benefit from each others chunk data and avoid unnecessary data + // duplication. + // + // Errors: + // + // * `NOT_FOUND`: The requested blob is not present in the CAS. + // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob + // chunks. + rpc SplitBlob(SplitBlobRequest) returns (SplitBlobResponse) { + option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{blob_digest.hash}/{blob_digest.size_bytes}:splitBlob" }; + } + + // Splice a blob from chunks. + // + // This is the complementary operation to the + // [ContentAddressableStorage.SplitBlob][build.bazel.remote.execution.v2.ContentAddressableStorage.SplitBlob] + // function to handle the chunked upload of large blobs to save upload + // traffic. + // + // If a client needs to upload a large blob and is able to split a blob into + // chunks in such a way that reusable chunks are obtained, e.g., by means of + // content-defined chunking, it can first determine which parts of the blob + // are already available in the remote CAS and upload the missing chunks, and + // then use this API to instruct the server to splice the original blob from + // the remotely available blob chunks. + // + // Servers which implement this functionality MUST declare that they support + // it by setting the + // [CacheCapabilities.splice_blob_support][build.bazel.remote.execution.v2.CacheCapabilities.splice_blob_support] + // field accordingly. + // + // Clients MUST check that the server supports this capability, before using + // it. + // + // In order to ensure data consistency of the CAS, the server MUST only add + // blobs to the CAS after verifying their digests. In particular, servers MUST NOT + // trust digests provided by the client. The server MAY accept a request as no-op + // if the client-specified blob is already in CAS; the lifetime of that blob SHOULD + // be extended as usual. If the client-specified blob is not already in the CAS, + // the server SHOULD verify that the digest of the newly created blob matches the + // digest specified by the client, and reject the request if they differ. + // + // When blob splitting and splicing is used at the same time, the clients and + // the server SHOULD agree out-of-band upon a chunking algorithm used by both + // parties to benefit from each others chunk data and avoid unnecessary data + // duplication. + // + // Errors: + // + // * `NOT_FOUND`: At least one of the blob chunks is not present in the CAS. + // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the + // spliced blob. + // * `INVALID_ARGUMENT`: The digest of the spliced blob is different from the + // provided expected digest. + rpc SpliceBlob(SpliceBlobRequest) returns (SpliceBlobResponse) { + option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:spliceBlob" body: "*" }; + } } // The Capabilities service may be used by remote execution clients to query @@ -590,7 +726,7 @@ message Command { // to execution, even if they are not explicitly part of the input root. // // DEPRECATED since v2.1: Use `output_paths` instead. - repeated string output_files = 3; + repeated string output_files = 3 [ deprecated = true ]; // A list of the output directories that the client expects to retrieve from // the action. Only the listed directories will be returned (an entire @@ -621,7 +757,7 @@ message Command { // if they are not explicitly part of the input root. // // DEPRECATED since 2.1: Use `output_paths` instead. - repeated string output_directories = 4; + repeated string output_directories = 4 [ deprecated = true ]; // A list of the output paths that the client expects to retrieve from the // action. Only the listed paths will be returned to the client as output. @@ -661,7 +797,7 @@ message Command { // DEPRECATED as of v2.2: platform properties are now specified directly in // the action. See documentation note in the // [Action][build.bazel.remote.execution.v2.Action] for migration. - Platform platform = 5; + Platform platform = 5 [ deprecated = true ]; // The working directory, relative to the input root, for the command to run // in. It must be a directory which exists in the input tree. If it is left @@ -680,6 +816,33 @@ message Command { // property is not recognized by the server, the server will return an // `INVALID_ARGUMENT`. repeated string output_node_properties = 8; + + enum OutputDirectoryFormat { + // The client is only interested in receiving output directories in + // the form of a single Tree object, using the `tree_digest` field. + TREE_ONLY = 0; + + // The client is only interested in receiving output directories in + // the form of a hierarchy of separately stored Directory objects, + // using the `root_directory_digest` field. + DIRECTORY_ONLY = 1; + + // The client is interested in receiving output directories both in + // the form of a single Tree object and a hierarchy of separately + // stored Directory objects, using both the `tree_digest` and + // `root_directory_digest` fields. + TREE_AND_DIRECTORY = 2; + } + + // The format that the worker should use to store the contents of + // output directories. + // + // In case this field is set to a value that is not supported by the + // worker, the worker SHOULD interpret this field as TREE_ONLY. The + // worker MAY store output directories in formats that are a superset + // of what was requested (e.g., interpreting DIRECTORY_ONLY as + // TREE_AND_DIRECTORY). + OutputDirectoryFormat output_directory_format = 9; } // A `Platform` is a set of requirements, such as hardware, operating system, or @@ -931,8 +1094,8 @@ message SymlinkNode { // serializing, but care should be taken to avoid shortcuts. For instance, // concatenating two messages to merge them may produce duplicate fields. message Digest { - // The hash. In the case of SHA-256, it will always be a lowercase hex string - // exactly 64 characters long. + // The hash, represented as a lowercase hexadecimal string, padded with + // leading zeroes up to the hash function length. string hash = 1; // The size of the blob, in bytes. @@ -1037,7 +1200,7 @@ message ActionResult { // // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API // should still populate this field in addition to `output_symlinks`. - repeated OutputSymlink output_file_symlinks = 10; + repeated OutputSymlink output_file_symlinks = 10 [ deprecated = true ]; // New in v2.1: this field will only be populated if the command // `output_paths` field was used, and not the pre v2.1 `output_files` or @@ -1137,7 +1300,7 @@ message ActionResult { // // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API // should still populate this field in addition to `output_symlinks`. - repeated OutputSymlink output_directory_symlinks = 11; + repeated OutputSymlink output_directory_symlinks = 11 [ deprecated = true ]; // The exit code of the command. int32 exit_code = 4; @@ -1272,6 +1435,15 @@ message OutputDirectory { // compute their digests, constructing the Tree object manually avoids // redundant marshaling. bool is_topologically_sorted = 4; + + // The digest of the encoded + // [Directory][build.bazel.remote.execution.v2.Directory] proto + // containing the contents the directory's root. + // + // If both `tree_digest` and `root_directory_digest` are set, this + // field MUST match the digest of the root directory contained in the + // Tree message. + Digest root_directory_digest = 5; } // An `OutputSymlink` is similar to a @@ -1365,6 +1537,29 @@ message ExecuteRequest { // The server will have a default policy if this is not provided. // This may be applied to both the ActionResult and the associated blobs. ResultsCachePolicy results_cache_policy = 8; + + // The digest function that was used to compute the action digest. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the action digest hash and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 9; + + // A hint to the server to request inlining stdout in the + // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message. + bool inline_stdout = 10; + + // A hint to the server to request inlining stderr in the + // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message. + bool inline_stderr = 11; + + // A hint to the server to inline the contents of the listed output files. + // Each path needs to exactly match one file path in either `output_paths` or + // `output_files` (DEPRECATED since v2.1) in the + // [Command][build.bazel.remote.execution.v2.Command] message. + repeated string inline_output_files = 12; } // A `LogFile` is a log stored in the CAS. @@ -1452,7 +1647,7 @@ message ExecutionStage { // Metadata about an ongoing // [execution][build.bazel.remote.execution.v2.Execution.Execute], which // will be contained in the [metadata -// field][google.longrunning.Operation.response] of the +// field][google.longrunning.Operation.metadata] of the // [Operation][google.longrunning.Operation]. message ExecuteOperationMetadata { // The current stage of execution. @@ -1471,6 +1666,19 @@ message ExecuteOperationMetadata { // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the // standard error from the endpoint hosting streamed responses. string stderr_stream_name = 4; + + // The client can read this field to view details about the ongoing + // execution. + ExecutedActionMetadata partial_execution_metadata = 5; + + // The digest function that was used to compute the action digest. + // + // If the digest function used is one of BLAKE3, MD5, MURMUR3, SHA1, + // SHA256, SHA256TREE, SHA384, SHA512, or VSO, the server MAY leave + // this field unset. In that case the client SHOULD infer the digest + // function using the length of the action digest hash and the digest + // functions announced in the server's capabilities. + DigestFunction.Value digest_function = 6; } // A request message for @@ -1508,6 +1716,15 @@ message GetActionResultRequest { // `output_files` (DEPRECATED since v2.1) in the // [Command][build.bazel.remote.execution.v2.Command] message. repeated string inline_output_files = 5; + + // The digest function that was used to compute the action digest. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the action digest hash and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 6; } // A request message for @@ -1532,6 +1749,15 @@ message UpdateActionResultRequest { // The server will have a default policy if this is not provided. // This may be applied to both the ActionResult and the associated blobs. ResultsCachePolicy results_cache_policy = 4; + + // The digest function that was used to compute the action digest. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the action digest hash and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 5; } // A request message for @@ -1544,8 +1770,18 @@ message FindMissingBlobsRequest { // omitted. string instance_name = 1; - // A list of the blobs to check. + // A list of the blobs to check. All digests MUST use the same digest + // function. repeated Digest blob_digests = 2; + + // The digest function of the blobs whose existence is checked. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the blob digest hashes and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 3; } // A response message for @@ -1560,14 +1796,15 @@ message FindMissingBlobsResponse { message BatchUpdateBlobsRequest { // A request corresponding to a single blob that the client wants to upload. message Request { - // The digest of the blob. This MUST be the digest of `data`. + // The digest of the blob. This MUST be the digest of `data`. All + // digests MUST use the same digest function. Digest digest = 1; // The raw binary data. bytes data = 2; // The format of `data`. Must be `IDENTITY`/unspecified, or one of the - // compressors advertised by the + // compressors advertised by the // [CacheCapabilities.supported_batch_compressors][build.bazel.remote.execution.v2.CacheCapabilities.supported_batch_compressors] // field. Compressor.Value compressor = 3; @@ -1582,6 +1819,16 @@ message BatchUpdateBlobsRequest { // The individual upload requests. repeated Request requests = 2; + + // The digest function that was used to compute the digests of the + // blobs being uploaded. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the blob digest hashes and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 5; } // A response message for @@ -1610,12 +1857,22 @@ message BatchReadBlobsRequest { // omitted. string instance_name = 1; - // The individual blob digests. + // The individual blob digests. All digests MUST use the same digest + // function. repeated Digest digests = 2; // A list of acceptable encodings for the returned inlined data, in no // particular order. `IDENTITY` is always allowed even if not specified here. repeated Compressor.Value acceptable_compressors = 3; + + // The digest function of the blobs being requested. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the blob digest hashes and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 4; } // A response message for @@ -1668,6 +1925,16 @@ message GetTreeRequest { // If present, the server will use that token as an offset, returning only // that page and the ones that succeed it. string page_token = 4; + + // The digest function that was used to compute the digest of the root + // directory. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the root digest hash and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 5; } // A response message for @@ -1683,6 +1950,86 @@ message GetTreeResponse { string next_page_token = 2; } +// A request message for +// [ContentAddressableStorage.SplitBlob][build.bazel.remote.execution.v2.ContentAddressableStorage.SplitBlob]. +message SplitBlobRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // The digest of the blob to be split. + Digest blob_digest = 2; + + // The digest function of the blob to be split. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the blob digest hashes and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 3; +} + +// A response message for +// [ContentAddressableStorage.SplitBlob][build.bazel.remote.execution.v2.ContentAddressableStorage.SplitBlob]. +message SplitBlobResponse { + // The ordered list of digests of the chunks into which the blob was split. + // The original blob is assembled by concatenating the chunk data according to + // the order of the digests given by this list. + // + // The server MUST use the same digest function as the one explicitly or + // implicitly (through hash length) specified in the split request. + repeated Digest chunk_digests = 1; +} + +// A request message for +// [ContentAddressableStorage.SpliceBlob][build.bazel.remote.execution.v2.ContentAddressableStorage.SpliceBlob]. +message SpliceBlobRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // Expected digest of the spliced blob. The client SHOULD set this field due + // to the following reasons: + // 1. It allows the server to perform an early existence check of the blob + // before spending the splicing effort, as described in the + // [ContentAddressableStorage.SpliceBlob][build.bazel.remote.execution.v2.ContentAddressableStorage.SpliceBlob] + // documentation. + // 2. It allows servers with different storage backends to dispatch the + // request to the correct storage backend based on the size and/or the + // hash of the blob. + Digest blob_digest = 2; + + // The ordered list of digests of the chunks which need to be concatenated to + // assemble the original blob. + repeated Digest chunk_digests = 3; + + // The digest function of all chunks to be concatenated and of the blob to be + // spliced. The server MUST use the same digest function for both cases. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, SHA384, + // SHA512, or VSO, the client MAY leave this field unset. In that case the + // server SHOULD infer the digest function using the length of the blob digest + // hashes and the digest functions announced in the server's capabilities. + DigestFunction.Value digest_function = 4; +} + +// A response message for +// [ContentAddressableStorage.SpliceBlob][build.bazel.remote.execution.v2.ContentAddressableStorage.SpliceBlob]. +message SpliceBlobResponse { + // Computed digest of the spliced blob. + // + // The server MUST use the same digest function as the one explicitly or + // implicitly (through hash length) specified in the splice request. + Digest blob_digest = 1; +} + // A request message for // [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities]. message GetCapabilitiesRequest { @@ -1743,6 +2090,66 @@ message DigestFunction { // cryptographic hash function and its collision properties are not strongly guaranteed. // See https://github.com/aappleby/smhasher/wiki/MurmurHash3 . MURMUR3 = 7; + + // The SHA-256 digest function, modified to use a Merkle tree for + // large objects. This permits implementations to store large blobs + // as a decomposed sequence of 2^j sized chunks, where j >= 10, + // while being able to validate integrity at the chunk level. + // + // Furthermore, on systems that do not offer dedicated instructions + // for computing SHA-256 hashes (e.g., the Intel SHA and ARMv8 + // cryptographic extensions), SHA256TREE hashes can be computed more + // efficiently than plain SHA-256 hashes by using generic SIMD + // extensions, such as Intel AVX2 or ARM NEON. + // + // SHA256TREE hashes are computed as follows: + // + // - For blobs that are 1024 bytes or smaller, the hash is computed + // using the regular SHA-256 digest function. + // + // - For blobs that are more than 1024 bytes in size, the hash is + // computed as follows: + // + // 1. The blob is partitioned into a left (leading) and right + // (trailing) blob. These blobs have lengths m and n + // respectively, where m = 2^k and 0 < n <= m. + // + // 2. Hashes of the left and right blob, Hash(left) and + // Hash(right) respectively, are computed by recursively + // applying the SHA256TREE algorithm. + // + // 3. A single invocation is made to the SHA-256 block cipher with + // the following parameters: + // + // M = Hash(left) || Hash(right) + // H = { + // 0xcbbb9d5d, 0x629a292a, 0x9159015a, 0x152fecd8, + // 0x67332667, 0x8eb44a87, 0xdb0c2e0d, 0x47b5481d, + // } + // + // The values of H are the leading fractional parts of the + // square roots of the 9th to the 16th prime number (23 to 53). + // This differs from plain SHA-256, where the first eight prime + // numbers (2 to 19) are used, thereby preventing trivial hash + // collisions between small and large objects. + // + // 4. The hash of the full blob can then be obtained by + // concatenating the outputs of the block cipher: + // + // Hash(blob) = a || b || c || d || e || f || g || h + // + // Addition of the original values of H, as normally done + // through the use of the Davies-Meyer structure, is not + // performed. This isn't necessary, as the block cipher is only + // invoked once. + // + // Test vectors of this digest function can be found in the + // accompanying sha256tree_test_vectors.txt file. + SHA256TREE = 8; + + // The BLAKE3 hash function. + // See https://github.com/BLAKE3-team/BLAKE3. + BLAKE3 = 9; } } @@ -1753,7 +2160,7 @@ message ActionCacheUpdateCapabilities { // Allowed values for priority in // [ResultsCachePolicy][build.bazel.remoteexecution.v2.ResultsCachePolicy] and -// [ExecutionPolicy][build.bazel.remoteexecution.v2.ResultsCachePolicy] +// [ExecutionPolicy][build.bazel.remoteexecution.v2.ExecutionPolicy] // Used for querying both cache and execution valid priority ranges. message PriorityCapabilities { // Supported range of priorities, including boundaries. @@ -1803,6 +2210,9 @@ message Compressor { // It is advised to use algorithms such as Zstandard instead, as // those are faster and/or provide a better compression ratio. DEFLATE = 2; + + // Brotli compression. + BROTLI = 3; } } @@ -1839,11 +2249,39 @@ message CacheCapabilities { // [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs] // requests. repeated Compressor.Value supported_batch_update_compressors = 7; + + // The maximum blob size that the server will accept for CAS blob uploads. + // - If it is 0, it means there is no limit set. A client may assume + // arbitrarily large blobs may be uploaded to and downloaded from the cache. + // - If it is larger than 0, implementations SHOULD NOT attempt to upload + // blobs with size larger than the limit. Servers SHOULD reject blob + // uploads over the `max_cas_blob_size_bytes` limit with response code + // `INVALID_ARGUMENT` + // - If the cache implementation returns a given limit, it MAY still serve + // blobs larger than this limit. + int64 max_cas_blob_size_bytes = 8; + + // Whether blob splitting is supported for the particular server/instance. If + // yes, the server/instance implements the specified behavior for blob + // splitting and a meaningful result can be expected from the + // [ContentAddressableStorage.SplitBlob][build.bazel.remote.execution.v2.ContentAddressableStorage.SplitBlob] + // operation. + bool split_blob_support = 9; + + // Whether blob splicing is supported for the particular server/instance. If + // yes, the server/instance implements the specified behavior for blob + // splicing and a meaningful result can be expected from the + // [ContentAddressableStorage.SpliceBlob][build.bazel.remote.execution.v2.ContentAddressableStorage.SpliceBlob] + // operation. + bool splice_blob_support = 10; } // Capabilities of the remote execution system. message ExecutionCapabilities { - // Remote execution may only support a single digest function. + // Legacy field for indicating which digest function is supported by the + // remote execution system. It MUST be set to a value other than UNKNOWN. + // Implementations should consider the repeated digest_functions field + // first, falling back to this singular field if digest_functions is unset. DigestFunction.Value digest_function = 1; // Whether remote execution is enabled for the particular server/instance. @@ -1854,6 +2292,20 @@ message ExecutionCapabilities { // Supported node properties. repeated string supported_node_properties = 4; + + // All the digest functions supported by the remote execution system. + // If this field is set, it MUST also contain digest_function. + // + // Even if the remote execution system announces support for multiple + // digest functions, individual execution requests may only reference + // CAS objects using a single digest function. For example, it is not + // permitted to execute actions having both MD5 and SHA-256 hashed + // files in their input root. + // + // The CAS objects referenced by action results generated by the + // remote execution system MUST use the same digest function as the + // one used to construct the action. + repeated DigestFunction.Value digest_functions = 5; } // Details for the tool used to call the API. @@ -1872,7 +2324,7 @@ message ToolDetails { // // * name: `build.bazel.remote.execution.v2.requestmetadata-bin` // * contents: the base64 encoded binary `RequestMetadata` message. -// Note: the gRPC library serializes binary headers encoded in base 64 by +// Note: the gRPC library serializes binary headers encoded in base64 by // default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests). // Therefore, if the gRPC library is used to pass/retrieve this // metadata, the user may ignore the base64 encoding and assume it is simply diff --git a/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/remote/logstream/v1/remote_logstream.proto b/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/remote/logstream/v1/remote_logstream.proto new file mode 100644 index 00000000000..2741016b926 --- /dev/null +++ b/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/remote/logstream/v1/remote_logstream.proto @@ -0,0 +1,139 @@ +// Copyright 2020 The Bazel Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Log Stream API + +syntax = "proto3"; + +package build.bazel.remote.logstream.v1; + +option csharp_namespace = "Build.Bazel.Remote.LogStream.v1"; +option go_package = "github.com/bazelbuild/remote-apis/build/bazel/remote/logstream/v1;remotelogstream"; +option java_multiple_files = true; +option java_outer_classname = "RemoteLogStreamProto"; +option java_package = "build.bazel.remote.logstream.v1"; +option objc_class_prefix = "RL"; + + +// #### Introduction +// +// The Log Stream API manages LogStream resources which are used to stream +// writes and reads of an ordered sequence of bytes of unknown eventual length. +// +// Note that this is an API Interface and not an API Service, per the definitions +// at: https://cloud.google.com/apis/design/glossary +// +// Log Stream API supports the reading of unfinalized LogStreams either by +// seeking or in "tail" mode, for example by end-users browsing to a build +// result UI interested in seeing logs from a build action as soon as they are +// (or as they become) available. +// +// Reads and Writes of LogStreams are done via the Byte Stream API: +// https://cloud.google.com/dataproc/docs/reference/rpc/google.bytestream +// https://github.com/googleapis/googleapis/blob/master/google/bytestream/bytestream.proto +// +// #### Writing LogStreams +// +// LogStreams are written to via the Byte Stream API's `Write` RPC. Bytes +// written to LogStreams are expected to be committed and available for reading +// within a reasonable period of time (implementation-defined). Committed bytes +// to a LogStream cannot be overwritten, and finalized LogStreams - indicated by +// setting `finish_write` field in the final WriteRequest - also cannot be +// appended to. +// +// When calling the Byte Stream API's `Write` RPC to write LogStreams, writers +// must pass the `write_resource_name` of a LogStream as +// `ByteStream.WriteRequest.resource_name` rather than the LogStream's `name`. +// Separate resource names for reading and writing allows for broadcasting the +// read resource name widely while simultaneously ensuring that only writer(s) +// with knowledge of the write resource name may have written bytes to the +// LogStream. +// +// #### Reading LogStreams +// +// Use the Byte Stream API's `Read` RPC to read LogStreams. When reading +// finalized LogStreams the server will stream all contents of the LogStream +// starting at `ByteStream.ReadRequest.read_offset`. +// +// When reading unfinalized LogStreams the server must keep the streaming +// `ByteStream.Read` RPC open and send `ByteStream.ReadResponse` messages as +// more bytes become available or the LogStream is finalized. +// +// #### Example Multi-Party Read/Write Flow +// +// 1. LogStream Writer calls `CreateLogStream` +// 2. LogStream Writer publishes `LogStream.name` +// 3. LogStream Writer calls `ByteStream.Write` with +// `LogStream.write_resource_name` as +// `ByteStream.WriteRequest.resource_name`, +// `ByteStream.WriteRequest.finish_write`=false. +// 4. LogStream Reader(s) call `ByteStream.Read` with the published +// `LogStream.name` as `ByteStream.ReadRequest.resource_name`. +// 5. LogStream Service streams all committed bytes to LogStream Reader(s), +// leave the stream open. +// 6. LogStream Writer calls `ByteStream.Write` with +// `LogStream.write_resource_name` as +// `ByteStream.WriteRequest.resource_name`, +// `ByteStream.WriteRequest.finish_write`=true. +// 7. LogStream Service streams all remaining bytes to LogStream Reader(s), +// terminates the stream. +service LogStreamService { + // Create a LogStream which may be written to. + // + // The returned LogStream resource name will include a `write_resource_name` + // which is the resource to use when writing to the LogStream. + // Callers of CreateLogStream are expected to NOT publish the + // `write_resource_name`. + rpc CreateLogStream(CreateLogStreamRequest) returns (LogStream) {} +} + +// Contains all information necessary to create a new LogStream resource. +message CreateLogStreamRequest { + // Required. The parent resource of the created LogStream. + // The list of valid types of parent resources of LogStreams is up to the + // implementing server. + // Example: projects/123 + string parent = 1; +} + +// A handle to a log (an ordered sequence of bytes). +message LogStream { + // Structured name of the resource in the format: + // {parent=**}/logstreams/{logstream_id} + // Example: projects/123/logstreams/456-def + // Attempting to call the Byte Stream API's `Write` RPC with a LogStream's + // `name` as the value for `ByteStream.Write.resource_name` is an error. + string name = 1; + + // Resource name to pass to `ByteStream.Write` in the format: + // {parent=**}/logstreams/{logstream_id}/{write_token} + // Example: projects/123/logstreams/456-def/789-ghi + // Attempting to call the Byte Stream API's `Read` RPC with a LogStream's + // `write_resource_name` as the value for `ByteStream.Write.resource_name` + // is an error. + // + // `write_resource_name` is separate from `name` to ensure that only the + // intended writers can write to a given LogStream. Writers must address write + // operations to the `write_resource_name`, not the `name`, and must have + // permission to write LogStreams. `write_resource_name` embeds a secret token + // and should be protected accordingly; a mishandled `write_resource_name` can + // result in unintended writers corrupting the LogStream. Therefore, the field + // should be excluded from calls to any calls which retrieve LogStream + // metadata (i.e.: `GetLogStream`). + // + // Bytes written to this resource must to be readable when `ByteStream.Read` + // is called with the `name` resource. + // Reading a write_resource_name must return an INVALID_ARGUMENT error. + string write_resource_name = 2; +} diff --git a/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/semver/semver.proto b/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/semver/semver.proto index 3b626b7e47c..44f83f85764 100644 --- a/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/semver/semver.proto +++ b/src/rust/protos/protos/bazelbuild_remote-apis/build/bazel/semver/semver.proto @@ -17,7 +17,7 @@ syntax = "proto3"; package build.bazel.semver; option csharp_namespace = "Build.Bazel.Semver"; -option go_package = "semver"; +option go_package = "github.com/bazelbuild/remote-apis/build/bazel/semver"; option java_multiple_files = true; option java_outer_classname = "SemverProto"; option java_package = "build.bazel.semver"; diff --git a/src/rust/protos/protos/googleapis/README.md b/src/rust/protos/protos/googleapis/README.md index b25d70f50a0..b3aab8bd103 100644 --- a/src/rust/protos/protos/googleapis/README.md +++ b/src/rust/protos/protos/googleapis/README.md @@ -1,5 +1,5 @@ This is a dump of the .proto files from https://github.com/googleapis/googleapis directory google. -This dump was taken at git sha e17dbfb19652240490cae8adeb89991d13cf9df7. +This dump was taken at git sha bc7e3baa28fbb223fa93782e130260fab8205bfc. It is a selective view of only the protos we actually need. diff --git a/src/rust/protos/protos/googleapis/google/api/annotations.proto b/src/rust/protos/protos/googleapis/google/api/annotations.proto index 85c361b47fe..417edd8fa19 100644 --- a/src/rust/protos/protos/googleapis/google/api/annotations.proto +++ b/src/rust/protos/protos/googleapis/google/api/annotations.proto @@ -1,4 +1,4 @@ -// Copyright (c) 2015, Google Inc. +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/rust/protos/protos/googleapis/google/api/client.proto b/src/rust/protos/protos/googleapis/google/api/client.proto new file mode 100644 index 00000000000..3d692560abb --- /dev/null +++ b/src/rust/protos/protos/googleapis/google/api/client.proto @@ -0,0 +1,486 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/launch_stage.proto"; +import "google/protobuf/descriptor.proto"; +import "google/protobuf/duration.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "ClientProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // * Adding this annotation to an unannotated method is backwards + // compatible. + // * Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // * Modifying or removing an existing method signature annotation is + // a breaking change. + // * Re-ordering existing method signature annotations is a breaking + // change. + repeated string method_signature = 1051; +} + +extend google.protobuf.ServiceOptions { + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + string default_host = 1049; + + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + string oauth_scopes = 1050; + + // The API version of this service, which should be sent by version-aware + // clients to the service. This allows services to abide by the schema and + // behavior of the service at the time this API version was deployed. + // The format of the API version must be treated as opaque by clients. + // Services may use a format with an apparent structure, but clients must + // not rely on this to determine components within an API version, or attempt + // to construct other valid API versions. Note that this is for upcoming + // functionality and may not be implemented for all services. + // + // Example: + // + // service Foo { + // option (google.api.api_version) = "v1_20230821_preview"; + // } + string api_version = 525000001; +} + +// Required information for every language. +message CommonLanguageSettings { + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + string reference_docs_uri = 1 [deprecated = true]; + + // The destination where API teams want this client library to be published. + repeated ClientLibraryDestination destinations = 2; + + // Configuration for which RPCs should be generated in the GAPIC client. + SelectiveGapicGeneration selective_gapic_generation = 3; +} + +// Details about how and where to publish client libraries. +message ClientLibrarySettings { + // Version of the API to apply these settings to. This is the full protobuf + // package for the API, ending in the version element. + // Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1". + string version = 1; + + // Launch stage of this version of the API. + LaunchStage launch_stage = 2; + + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + bool rest_numeric_enums = 3; + + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings java_settings = 21; + + // Settings for C++ client libraries. + CppSettings cpp_settings = 22; + + // Settings for PHP client libraries. + PhpSettings php_settings = 23; + + // Settings for Python client libraries. + PythonSettings python_settings = 24; + + // Settings for Node client libraries. + NodeSettings node_settings = 25; + + // Settings for .NET client libraries. + DotnetSettings dotnet_settings = 26; + + // Settings for Ruby client libraries. + RubySettings ruby_settings = 27; + + // Settings for Go client libraries. + GoSettings go_settings = 28; +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +message Publishing { + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + repeated MethodSettings method_settings = 2; + + // Link to a *public* URI where users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + string new_issue_uri = 101; + + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + string documentation_uri = 102; + + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + string api_short_name = 103; + + // GitHub label to apply to issues and pull requests opened for this API. + string github_label = 104; + + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + repeated string codeowner_github_teams = 105; + + // A prefix used in sample code when demarking regions to be included in + // documentation. + string doc_tag_prefix = 106; + + // For whom the client library is being published. + ClientLibraryOrganization organization = 107; + + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + repeated ClientLibrarySettings library_settings = 109; + + // Optional link to proto reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rpc + string proto_reference_documentation_uri = 110; + + // Optional link to REST reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rest + string rest_reference_documentation_uri = 111; +} + +// Settings for Java client libraries. +message JavaSettings { + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + string library_package = 1; + + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + map service_class_names = 2; + + // Some settings. + CommonLanguageSettings common = 3; +} + +// Settings for C++ client libraries. +message CppSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Php client libraries. +message PhpSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Python client libraries. +message PythonSettings { + // Experimental features to be included during client library generation. + // These fields will be deprecated once the feature graduates and is enabled + // by default. + message ExperimentalFeatures { + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + bool rest_async_io_enabled = 1; + + // Enables generation of protobuf code using new types that are more + // Pythonic which are included in `protobuf>=5.29.x`. This feature will be + // enabled by default 1 month after launching the feature in preview + // packages. + bool protobuf_pythonic_types_enabled = 2; + + // Disables generation of an unversioned Python package for this client + // library. This means that the module names will need to be versioned in + // import statements. For example `import google.cloud.library_v2` instead + // of `import google.cloud.library`. + bool unversioned_package_disabled = 3; + } + + // Some settings. + CommonLanguageSettings common = 1; + + // Experimental features to be included during client library generation. + ExperimentalFeatures experimental_features = 2; +} + +// Settings for Node client libraries. +message NodeSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Dotnet client libraries. +message DotnetSettings { + // Some settings. + CommonLanguageSettings common = 1; + + // Map from original service names to renamed versions. + // This is used when the default generated types + // would cause a naming conflict. (Neither name is + // fully-qualified.) + // Example: Subscriber to SubscriberServiceApi. + map renamed_services = 2; + + // Map from full resource types to the effective short name + // for the resource. This is used when otherwise resource + // named from different services would cause naming collisions. + // Example entry: + // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + map renamed_resources = 3; + + // List of full resource types to ignore during generation. + // This is typically used for API-specific Location resources, + // which should be handled by the generator as if they were actually + // the common Location resources. + // Example entry: "documentai.googleapis.com/Location" + repeated string ignored_resources = 4; + + // Namespaces which must be aliased in snippets due to + // a known (but non-generator-predictable) naming collision + repeated string forced_namespace_aliases = 5; + + // Method signatures (in the form "service.method(signature)") + // which are provided separately, so shouldn't be generated. + // Snippets *calling* these methods are still generated, however. + repeated string handwritten_signatures = 6; +} + +// Settings for Ruby client libraries. +message RubySettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Go client libraries. +message GoSettings { + // Some settings. + CommonLanguageSettings common = 1; + + // Map of service names to renamed services. Keys are the package relative + // service names and values are the name to be used for the service client + // and call options. + // + // publishing: + // go_settings: + // renamed_services: + // Publisher: TopicAdmin + map renamed_services = 2; +} + +// Describes the generator configuration for a method. +message MethodSettings { + // Describes settings to use when generating API methods that use the + // long-running operation pattern. + // All default values below are from those used in the client library + // generators (e.g. + // [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). + message LongRunning { + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + google.protobuf.Duration initial_poll_delay = 1; + + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + float poll_delay_multiplier = 2; + + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + google.protobuf.Duration max_poll_delay = 3; + + // Total polling timeout. + // Default value: 5 minutes. + google.protobuf.Duration total_poll_timeout = 4; + } + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... + string selector = 1; + + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_settings: + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes + LongRunning long_running = 2; + + // List of top-level fields of the request message, that should be + // automatically populated by the client libraries based on their + // (google.api.field_info).format. Currently supported format: UUID4. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id + repeated string auto_populated_fields = 3; +} + +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +enum ClientLibraryOrganization { + // Not useful. + CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED = 0; + + // Google Cloud Platform Org. + CLOUD = 1; + + // Ads (Advertising) Org. + ADS = 2; + + // Photos Org. + PHOTOS = 3; + + // Street View Org. + STREET_VIEW = 4; + + // Shopping Org. + SHOPPING = 5; + + // Geo Org. + GEO = 6; + + // Generative AI - https://developers.generativeai.google + GENERATIVE_AI = 7; +} + +// To where should client libraries be published? +enum ClientLibraryDestination { + // Client libraries will neither be generated nor published to package + // managers. + CLIENT_LIBRARY_DESTINATION_UNSPECIFIED = 0; + + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + GITHUB = 10; + + // Publish the library to package managers like nuget.org and npmjs.com. + PACKAGE_MANAGER = 20; +} + +// This message is used to configure the generation of a subset of the RPCs in +// a service for client libraries. +message SelectiveGapicGeneration { + // An allowlist of the fully qualified names of RPCs that should be included + // on public client surfaces. + repeated string methods = 1; + + // Setting this to true indicates to the client generators that methods + // that would be excluded from the generation should instead be generated + // in a way that indicates these methods should not be consumed by + // end users. How this is expressed is up to individual language + // implementations to decide. Some examples may be: added annotations, + // obfuscated identifiers, or other language idiomatic patterns. + bool generate_omitted_as_internal = 2; +} diff --git a/src/rust/protos/protos/googleapis/google/api/field_behavior.proto b/src/rust/protos/protos/googleapis/google/api/field_behavior.proto new file mode 100644 index 00000000000..1fdaaed11ac --- /dev/null +++ b/src/rust/protos/protos/googleapis/google/api/field_behavior.proto @@ -0,0 +1,104 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "FieldBehaviorProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.FieldOptions { + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + repeated google.api.FieldBehavior field_behavior = 1052 [packed = false]; +} + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +enum FieldBehavior { + // Conventional default for enums. Do not use this. + FIELD_BEHAVIOR_UNSPECIFIED = 0; + + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + OPTIONAL = 1; + + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + REQUIRED = 2; + + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + OUTPUT_ONLY = 3; + + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + INPUT_ONLY = 4; + + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + IMMUTABLE = 5; + + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + UNORDERED_LIST = 6; + + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + NON_EMPTY_DEFAULT = 7; + + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + IDENTIFIER = 8; +} diff --git a/src/rust/protos/protos/googleapis/google/api/http.proto b/src/rust/protos/protos/googleapis/google/api/http.proto index 5f8538a0164..57621b53743 100644 --- a/src/rust/protos/protos/googleapis/google/api/http.proto +++ b/src/rust/protos/protos/googleapis/google/api/http.proto @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,15 +16,13 @@ syntax = "proto3"; package google.api; -option cc_enable_arenas = true; option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; option java_multiple_files = true; option java_outer_classname = "HttpProto"; option java_package = "com.google.api"; option objc_class_prefix = "GAPI"; - -// Defines the HTTP configuration for a service. It contains a list of +// Defines the HTTP configuration for an API service. It contains a list of // [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method // to one or more HTTP REST API methods. message Http { @@ -32,91 +30,101 @@ message Http { // // **NOTE:** All service configuration rules follow "last one wins" order. repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; } -// `HttpRule` defines the mapping of an RPC method to one or more HTTP -// REST APIs. The mapping determines what portions of the request -// message are populated from the path, query parameters, or body of -// the HTTP request. The mapping is typically specified as an -// `google.api.http` annotation, see "google/api/annotations.proto" -// for details. -// -// The mapping consists of a field specifying the path template and -// method kind. The path template can refer to fields in the request -// message, as in the example below which describes a REST GET -// operation on a resource collection of messages: -// +// gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: // // service Messaging { // rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; // } // } // message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // mapped to the URL -// SubMessage sub = 2; // `sub.subfield` is url-mapped +// string name = 1; // Mapped to URL path. // } // message Message { -// string text = 1; // content of the resource +// string text = 1; // The resource content. // } // -// The same http annotation can alternatively be expressed inside the -// `GRPC API Configuration` YAML file. +// This enables an HTTP REST to gRPC mapping as below: // -// http: -// rules: -// - selector: .Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// This definition enables an automatic, bidrectional mapping of HTTP -// JSON to RPC. Example: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` -// -// In general, not only fields but also field paths can be referenced -// from a path pattern. Fields mapped to the path pattern cannot be -// repeated and must have a primitive (non-message) type. -// -// Any fields in the request message which are not bound by the path -// pattern automatically become (optional) HTTP query -// parameters. Assume the following definition of the request message: +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` // +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: // +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } // message GetMessageRequest { // message SubMessage { // string subfield = 1; // } -// string message_id = 1; // mapped to the URL -// int64 revision = 2; // becomes a parameter -// SubMessage sub = 3; // `sub.subfield` becomes a parameter +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. // } // -// // This enables a HTTP JSON to RPC mapping as below: // -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` // -// Note that fields which are mapped to HTTP parameters must have a -// primitive type or a repeated primitive type. Message types are not -// allowed. In the case of a repeated type, the parameter can be -// repeated in the URL, as in `...?param=A¶m=B`. +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. // -// For HTTP method kinds which allow a request body, the `body` field +// For HTTP methods that allow a request body, the `body` field // specifies the mapping. Consider a REST update method on the // message resource collection: // -// // service Messaging { // rpc UpdateMessage(UpdateMessageRequest) returns (Message) { // option (google.api.http) = { -// put: "/v1/messages/{message_id}" +// patch: "/v1/messages/{message_id}" // body: "message" // }; // } @@ -126,14 +134,12 @@ message Http { // Message message = 2; // mapped to the body // } // -// // The following HTTP JSON to RPC mapping is enabled, where the // representation of the JSON in the request body is determined by // protos JSON encoding: // -// HTTP | RPC -// -----|----- -// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the @@ -143,7 +149,7 @@ message Http { // service Messaging { // rpc UpdateMessage(Message) returns (Message) { // option (google.api.http) = { -// put: "/v1/messages/{message_id}" +// patch: "/v1/messages/{message_id}" // body: "*" // }; // } @@ -156,13 +162,12 @@ message Http { // // The following HTTP JSON to RPC mapping is enabled: // -// HTTP | RPC -// -----|----- -// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice of +// the body. This makes this option more rarely used in practice when // defining REST APIs. The common usage of `*` is in custom methods // which don't use the URL at all for transferring data. // @@ -184,32 +189,34 @@ message Http { // string user_id = 2; // } // +// This enables the following two alternative HTTP JSON to RPC mappings: // -// This enables the following two alternative HTTP JSON to RPC -// mappings: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` // -// # Rules for HTTP mapping +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` // -// The rules for mapping HTTP path, query parameters, and body fields -// to the request message are as follows: +// Rules for HTTP mapping // -// 1. The `body` field specifies either `*` or a field path, or is -// omitted. If omitted, it assumes there is no HTTP body. -// 2. Leaf fields (recursive expansion of nested messages in the -// request) can be classified into three types: -// (a) Matched in the URL template. -// (b) Covered by body (if body is `*`, everything except (a) fields; -// else everything under the body field) -// (c) All other fields. -// 3. URL query parameters found in the HTTP request are mapped to (c) fields. -// 4. Any body sent with an HTTP request can contain only (b) fields. +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all +// fields are passed via URL path and URL query parameters. // -// The syntax of the path template is as follows: +// Path template syntax // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; @@ -218,63 +225,135 @@ message Http { // FieldPath = IDENT { "." IDENT } ; // Verb = ":" LITERAL ; // -// The syntax `*` matches a single path segment. It follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion. -// -// The syntax `**` matches zero or more path segments. It follows the semantics -// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved -// Expansion. NOTE: it must be the last segment in the path except the Verb. -// -// The syntax `LITERAL` matches literal text in the URL path. +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. // -// The syntax `Variable` matches the entire path as specified by its template; -// this nested template must not contain further variables. If a variable +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable // matches a single path segment, its template may be omitted, e.g. `{var}` // is equivalent to `{var=*}`. // -// NOTE: the field paths in variables and in the `body` must not refer to -// repeated fields or map fields. +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// The following example selects a gRPC method and applies an `HttpRule` to it: // -// Use CustomHttpPattern to specify any HTTP method that is not included in the -// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for -// a given URL path rule. The wild-card rule is useful for services that provide -// content to Web (HTML) clients. +// http: +// rules: +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. message HttpRule { - // Selects methods to which this rule applies. + // Selects a method to which this rule applies. // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. string selector = 1; // Determines the URL pattern is matched by this rules. This pattern can be // used with any of the {get|put|post|delete|patch} methods. A custom method // can be defined using the 'custom' field. oneof pattern { - // Used for listing and getting information about resources. + // Maps to HTTP GET. Used for listing and getting information about + // resources. string get = 2; - // Used for updating a resource. + // Maps to HTTP PUT. Used for replacing a resource. string put = 3; - // Used for creating a resource. + // Maps to HTTP POST. Used for creating a resource or performing an action. string post = 4; - // Used for deleting a resource. + // Maps to HTTP DELETE. Used for deleting a resource. string delete = 5; - // Used for updating a resource. + // Maps to HTTP PATCH. Used for updating a resource. string patch = 6; - // Custom pattern is used for defining custom verbs. + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. CustomHttpPattern custom = 8; } - // The name of the request field whose value is mapped to the HTTP body, or - // `*` for mapping all fields not captured by the path pattern to the HTTP - // body. NOTE: the referred field must not be a repeated field and must be - // present at the top-level of request message type. + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. string body = 7; + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + // Additional HTTP bindings for the selector. Nested bindings must // not contain an `additional_bindings` field themselves (that is, // the nesting may only be one level deep). diff --git a/src/rust/protos/protos/googleapis/google/api/launch_stage.proto b/src/rust/protos/protos/googleapis/google/api/launch_stage.proto new file mode 100644 index 00000000000..1e86c1ad178 --- /dev/null +++ b/src/rust/protos/protos/googleapis/google/api/launch_stage.proto @@ -0,0 +1,72 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option go_package = "google.golang.org/genproto/googleapis/api;api"; +option java_multiple_files = true; +option java_outer_classname = "LaunchStageProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +enum LaunchStage { + // Do not use this default value. + LAUNCH_STAGE_UNSPECIFIED = 0; + + // The feature is not yet implemented. Users can not use it. + UNIMPLEMENTED = 6; + + // Prelaunch features are hidden from users and are only visible internally. + PRELAUNCH = 7; + + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + EARLY_ACCESS = 1; + + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + ALPHA = 2; + + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + BETA = 3; + + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + GA = 4; + + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + DEPRECATED = 5; +} diff --git a/src/rust/protos/protos/googleapis/google/bytestream/bytestream.proto b/src/rust/protos/protos/googleapis/google/bytestream/bytestream.proto index 85e386fc2b2..26bc609e6a7 100644 --- a/src/rust/protos/protos/googleapis/google/bytestream/bytestream.proto +++ b/src/rust/protos/protos/googleapis/google/bytestream/bytestream.proto @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,14 +16,10 @@ syntax = "proto3"; package google.bytestream; -import "google/api/annotations.proto"; -import "google/protobuf/wrappers.proto"; - option go_package = "google.golang.org/genproto/googleapis/bytestream;bytestream"; option java_outer_classname = "ByteStreamProto"; option java_package = "com.google.bytestream"; - // #### Introduction // // The Byte Stream API enables a client to read and write a stream of bytes to @@ -91,7 +87,8 @@ service ByteStream { // evicted. For any sequence of `QueryWriteStatus()` calls for a given // resource name, the sequence of returned `committed_size` values will be // non-decreasing. - rpc QueryWriteStatus(QueryWriteStatusRequest) returns (QueryWriteStatusResponse); + rpc QueryWriteStatus(QueryWriteStatusRequest) + returns (QueryWriteStatusResponse); } // Request object for ByteStream.Read. diff --git a/src/rust/protos/protos/googleapis/google/longrunning/operations.proto b/src/rust/protos/protos/googleapis/google/longrunning/operations.proto index 2fb7a31ad7b..7e8923548e4 100644 --- a/src/rust/protos/protos/googleapis/google/longrunning/operations.proto +++ b/src/rust/protos/protos/googleapis/google/longrunning/operations.proto @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,41 +17,61 @@ syntax = "proto3"; package google.longrunning; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; import "google/protobuf/any.proto"; +import "google/protobuf/descriptor.proto"; +import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/rpc/status.proto"; option csharp_namespace = "Google.LongRunning"; -option go_package = "google.golang.org/genproto/googleapis/longrunning;longrunning"; +option go_package = "cloud.google.com/go/longrunning/autogen/longrunningpb;longrunningpb"; option java_multiple_files = true; option java_outer_classname = "OperationsProto"; option java_package = "com.google.longrunning"; +option objc_class_prefix = "GLRUN"; +// option php_namespace = "Google\\LongRunning"; // Commented out for protoc compatibility +extend google.protobuf.MethodOptions { + // Additional information regarding long-running operations. + // In particular, this specifies the types that are returned from + // long-running operations. + // + // Required for methods that return `google.longrunning.Operation`; invalid + // otherwise. + google.longrunning.OperationInfo operation_info = 1049; +} // Manages long-running operations with an API service. // // When an API method normally takes long time to complete, it can be designed -// to return [Operation][google.longrunning.Operation] to the client, and the client can use this -// interface to receive the real response asynchronously by polling the -// operation resource, or pass the operation resource to another API (such as -// Google Cloud Pub/Sub API) to receive the response. Any API service that -// returns long-running operations should implement the `Operations` interface -// so developers can have a consistent client experience. +// to return [Operation][google.longrunning.Operation] to the client, and the +// client can use this interface to receive the real response asynchronously by +// polling the operation resource, or pass the operation resource to another API +// (such as Pub/Sub API) to receive the response. Any API service that returns +// long-running operations should implement the `Operations` interface so +// developers can have a consistent client experience. service Operations { + option (google.api.default_host) = "longrunning.googleapis.com"; + // Lists operations that match the specified filter in the request. If the // server doesn't support this method, it returns `UNIMPLEMENTED`. - // - // NOTE: the `name` binding below allows API services to override the binding - // to use different resource name schemes, such as `users/*/operations`. rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) { - option (google.api.http) = { get: "/v1/{name=operations}" }; + option (google.api.http) = { + get: "/v1/{name=operations}" + }; + option (google.api.method_signature) = "name,filter"; } // Gets the latest state of a long-running operation. Clients can use this // method to poll the operation result at intervals as recommended by the API // service. rpc GetOperation(GetOperationRequest) returns (Operation) { - option (google.api.http) = { get: "/v1/{name=operations/**}" }; + option (google.api.http) = { + get: "/v1/{name=operations/**}" + }; + option (google.api.method_signature) = "name"; } // Deletes a long-running operation. This method indicates that the client is @@ -59,7 +79,10 @@ service Operations { // operation. If the server doesn't support this method, it returns // `google.rpc.Code.UNIMPLEMENTED`. rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=operations/**}" }; + option (google.api.http) = { + delete: "/v1/{name=operations/**}" + }; + option (google.api.method_signature) = "name"; } // Starts asynchronous cancellation on a long-running operation. The server @@ -70,11 +93,27 @@ service Operations { // other methods to check whether the cancellation succeeded or whether the // operation completed despite cancellation. On successful cancellation, // the operation is not deleted; instead, it becomes an operation with - // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, - // corresponding to `Code.CANCELLED`. + // an [Operation.error][google.longrunning.Operation.error] value with a + // [google.rpc.Status.code][google.rpc.Status.code] of `1`, corresponding to + // `Code.CANCELLED`. rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" }; + option (google.api.http) = { + post: "/v1/{name=operations/**}:cancel" + body: "*" + }; + option (google.api.method_signature) = "name"; } + + // Waits until the specified long-running operation is done or reaches at most + // a specified timeout, returning the latest state. If the operation is + // already done, the latest state is immediately returned. If the timeout + // specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + // timeout is used. If the server does not support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. + // Note that this method is on a best-effort basis. It may return the latest + // state before the specified timeout (including immediately), meaning even an + // immediate response is no guarantee that the operation is done. + rpc WaitOperation(WaitOperationRequest) returns (Operation) {} } // This resource represents a long-running operation that is the result of a @@ -82,7 +121,7 @@ service Operations { message Operation { // The server-assigned name, which is only unique within the same service that // originally returns it. If you use the default HTTP mapping, the - // `name` should have the format of `operations/some/unique/name`. + // `name` should be a resource name ending with `operations/{unique_id}`. string name = 1; // Service-specific metadata associated with the operation. It typically @@ -92,18 +131,19 @@ message Operation { google.protobuf.Any metadata = 2; // If the value is `false`, it means the operation is still in progress. - // If true, the operation is completed, and either `error` or `response` is + // If `true`, the operation is completed, and either `error` or `response` is // available. bool done = 3; // The operation result, which can be either an `error` or a valid `response`. // If `done` == `false`, neither `error` nor `response` is set. - // If `done` == `true`, exactly one of `error` or `response` is set. + // If `done` == `true`, exactly one of `error` or `response` can be set. + // Some services might not provide the result. oneof result { // The error result of the operation in case of failure or cancellation. google.rpc.Status error = 4; - // The normal response of the operation in case of success. If the original + // The normal, successful response of the operation. If the original // method returns no data on success, such as `Delete`, the response is // `google.protobuf.Empty`. If the original method is standard // `Get`/`Create`/`Update`, the response should be the resource. For other @@ -115,15 +155,17 @@ message Operation { } } -// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. +// The request message for +// [Operations.GetOperation][google.longrunning.Operations.GetOperation]. message GetOperationRequest { // The name of the operation resource. string name = 1; } -// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +// The request message for +// [Operations.ListOperations][google.longrunning.Operations.ListOperations]. message ListOperationsRequest { - // The name of the operation collection. + // The name of the operation's parent resource. string name = 4; // The standard list filter. @@ -134,26 +176,90 @@ message ListOperationsRequest { // The standard list page token. string page_token = 3; + + // When set to `true`, operations that are reachable are returned as normal, + // and those that are unreachable are returned in the + // [ListOperationsResponse.unreachable] field. + // + // This can only be `true` when reading across collections e.g. when `parent` + // is set to `"projects/example/locations/-"`. + // + // This field is not by default supported and will result in an + // `UNIMPLEMENTED` error if set unless explicitly documented otherwise in + // service or product specific documentation. + bool return_partial_success = 5; } -// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +// The response message for +// [Operations.ListOperations][google.longrunning.Operations.ListOperations]. message ListOperationsResponse { // A list of operations that matches the specified filter in the request. repeated Operation operations = 1; // The standard List next-page token. string next_page_token = 2; + + // Unordered list. Unreachable resources. Populated when the request sets + // `ListOperationsRequest.return_partial_success` and reads across + // collections e.g. when attempting to list all resources across all supported + // locations. + repeated string unreachable = 3 + [(google.api.field_behavior) = UNORDERED_LIST]; } -// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. +// The request message for +// [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. message CancelOperationRequest { // The name of the operation resource to be cancelled. string name = 1; } -// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. +// The request message for +// [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. message DeleteOperationRequest { // The name of the operation resource to be deleted. string name = 1; } +// The request message for +// [Operations.WaitOperation][google.longrunning.Operations.WaitOperation]. +message WaitOperationRequest { + // The name of the operation resource to wait on. + string name = 1; + + // The maximum duration to wait before timing out. If left blank, the wait + // will be at most the time permitted by the underlying HTTP/RPC protocol. + // If RPC context deadline is also specified, the shorter one will be used. + google.protobuf.Duration timeout = 2; +} + +// A message representing the message types used by a long-running operation. +// +// Example: +// +// rpc Export(ExportRequest) returns (google.longrunning.Operation) { +// option (google.longrunning.operation_info) = { +// response_type: "ExportResponse" +// metadata_type: "ExportMetadata" +// }; +// } +message OperationInfo { + // Required. The message name of the primary return type for this + // long-running operation. + // This type will be used to deserialize the LRO's response. + // + // If the response is in a different package from the rpc, a fully-qualified + // message name must be used (e.g. `google.protobuf.Struct`). + // + // Note: Altering this value constitutes a breaking change. + string response_type = 1; + + // Required. The message name of the metadata type for this long-running + // operation. + // + // If the response is in a different package from the rpc, a fully-qualified + // message name must be used (e.g. `google.protobuf.Struct`). + // + // Note: Altering this value constitutes a breaking change. + string metadata_type = 2; +} diff --git a/src/rust/protos/protos/googleapis/google/rpc/code.proto b/src/rust/protos/protos/googleapis/google/rpc/code.proto index 8fef411705b..aa6ce153783 100644 --- a/src/rust/protos/protos/googleapis/google/rpc/code.proto +++ b/src/rust/protos/protos/googleapis/google/rpc/code.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,8 +22,7 @@ option java_outer_classname = "CodeProto"; option java_package = "com.google.rpc"; option objc_class_prefix = "RPC"; - -// The canonical error codes for Google APIs. +// The canonical error codes for gRPC APIs. // // // Sometimes multiple error codes may apply. Services should return @@ -31,7 +30,7 @@ option objc_class_prefix = "RPC"; // `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. // Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. enum Code { - // Not an error; returned on success + // Not an error; returned on success. // // HTTP Mapping: 200 OK OK = 0; @@ -70,7 +69,7 @@ enum Code { // Some requested entity (e.g., file or directory) was not found. // // Note to server developers: if a request is denied for an entire class - // of users, such as gradual feature rollout or undocumented whitelist, + // of users, such as gradual feature rollout or undocumented allowlist, // `NOT_FOUND` may be used. If a request is denied for some users within // a class of users, such as user-based access control, `PERMISSION_DENIED` // must be used. @@ -116,11 +115,11 @@ enum Code { // Service implementors can use the following guidelines to decide // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: // (a) Use `UNAVAILABLE` if the client can retry just the failing call. - // (b) Use `ABORTED` if the client should retry at a higher level - // (e.g., when a client-specified test-and-set fails, indicating the - // client should restart a read-modify-write sequence). + // (b) Use `ABORTED` if the client should retry at a higher level. For + // example, when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence. // (c) Use `FAILED_PRECONDITION` if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" + // the system state has been explicitly fixed. For example, if an "rmdir" // fails because the directory is non-empty, `FAILED_PRECONDITION` // should be returned since the client should not retry unless // the files are deleted from the directory. @@ -171,7 +170,8 @@ enum Code { // The service is currently unavailable. This is most likely a // transient condition, which can be corrected by retrying with - // a backoff. + // a backoff. Note that it is not always safe to retry + // non-idempotent operations. // // See the guidelines above for deciding between `FAILED_PRECONDITION`, // `ABORTED`, and `UNAVAILABLE`. diff --git a/src/rust/protos/protos/googleapis/google/rpc/error_details.proto b/src/rust/protos/protos/googleapis/google/rpc/error_details.proto index f24ae00999b..4f9ecff0351 100644 --- a/src/rust/protos/protos/googleapis/google/rpc/error_details.proto +++ b/src/rust/protos/protos/googleapis/google/rpc/error_details.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,6 +24,57 @@ option java_outer_classname = "ErrorDetailsProto"; option java_package = "com.google.rpc"; option objc_class_prefix = "RPC"; +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +message ErrorInfo { + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + string reason = 1; + + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + string domain = 2; + + // Additional structured details about this error. + // + // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should + // ideally be lowerCamelCase. Also, they must be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // `{"instanceLimit": "100/request"}`, should be returned as, + // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of + // instances that can be created in a single (batch) request. + map metadata = 3; +} // Describes when the clients can retry a failed request. Clients could ignore // the recommendation here or retry when this information is missing from error @@ -36,7 +87,7 @@ option objc_class_prefix = "RPC"; // receiving the error response before retrying. If retrying requests also // fail, clients should use an exponential backoff scheme to gradually increase // the delay between retries based on `retry_delay`, until either a maximum -// number of retires have been reached or a maximum retry delay cap has been +// number of retries have been reached or a maximum retry delay cap has been // reached. message RetryInfo { // Clients should wait at least this long between retrying the same request. @@ -61,7 +112,7 @@ message DebugInfo { // a service could respond with the project id and set `service_disabled` // to true. // -// Also see RetryDetail and Help types for other details about handling a +// Also see RetryInfo and Help types for other details about handling a // quota failure. message QuotaFailure { // A message type used to describe a single quota violation. For example, a @@ -80,6 +131,71 @@ message QuotaFailure { // For example: "Service disabled" or "Daily Limit for read operations // exceeded". string description = 2; + + // The API Service from which the `QuotaFailure.Violation` orginates. In + // some cases, Quota issues originate from an API Service other than the one + // that was called. In other words, a dependency of the called API Service + // could be the cause of the `QuotaFailure`, and this field would have the + // dependency API service name. + // + // For example, if the called API is Kubernetes Engine API + // (container.googleapis.com), and a quota violation occurs in the + // Kubernetes Engine API itself, this field would be + // "container.googleapis.com". On the other hand, if the quota violation + // occurs when the Kubernetes Engine API creates VMs in the Compute Engine + // API (compute.googleapis.com), this field would be + // "compute.googleapis.com". + string api_service = 3; + + // The metric of the violated quota. A quota metric is a named counter to + // measure usage, such as API requests or CPUs. When an activity occurs in a + // service, such as Virtual Machine allocation, one or more quota metrics + // may be affected. + // + // For example, "compute.googleapis.com/cpus_per_vm_family", + // "storage.googleapis.com/internet_egress_bandwidth". + string quota_metric = 4; + + // The id of the violated quota. Also know as "limit name", this is the + // unique identifier of a quota in the context of an API service. + // + // For example, "CPUS-PER-VM-FAMILY-per-project-region". + string quota_id = 5; + + // The dimensions of the violated quota. Every non-global quota is enforced + // on a set of dimensions. While quota metric defines what to count, the + // dimensions specify for what aspects the counter should be increased. + // + // For example, the quota "CPUs per region per VM family" enforces a limit + // on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions + // "region" and "vm_family". And if the violation occurred in region + // "us-central1" and for VM family "n1", the quota_dimensions would be, + // + // { + // "region": "us-central1", + // "vm_family": "n1", + // } + // + // When a quota is enforced globally, the quota_dimensions would always be + // empty. + map quota_dimensions = 6; + + // The enforced quota value at the time of the `QuotaFailure`. + // + // For example, if the enforced quota value at the time of the + // `QuotaFailure` on the number of CPUs is "10", then the value of this + // field would reflect this quantity. + int64 quota_value = 7; + + // The new quota value being rolled out at the time of the violation. At the + // completion of the rollout, this value will be enforced in place of + // quota_value. If no rollout is in progress at the time of the violation, + // this field is not set. + // + // For example, if at the time of the violation a rollout is in progress + // changing the number of CPUs quota from 10 to 20, 20 would be the value of + // this field. + optional int64 future_quota_value = 8; } // Describes all quota violations. @@ -95,13 +211,13 @@ message PreconditionFailure { // A message type used to describe a single precondition failure. message Violation { // The type of PreconditionFailure. We recommend using a service-specific - // enum type to define the supported precondition violation types. For + // enum type to define the supported precondition violation subjects. For // example, "TOS" for "Terms of Service violation". string type = 1; // The subject, relative to the type, that failed. - // For example, "google.com/cloud" relative to the "TOS" type would - // indicate which terms of service is being referenced. + // For example, "google.com/cloud" relative to the "TOS" type would indicate + // which terms of service is being referenced. string subject = 2; // A description of how the precondition failed. Developers can use this @@ -120,13 +236,59 @@ message PreconditionFailure { message BadRequest { // A message type used to describe a single bad request field. message FieldViolation { - // A path leading to a field in the request body. The value will be a + // A path that leads to a field in the request body. The value will be a // sequence of dot-separated identifiers that identify a protocol buffer - // field. E.g., "field_violations.field" would identify this field. + // field. + // + // Consider the following: + // + // message CreateContactRequest { + // message EmailAddress { + // enum Type { + // TYPE_UNSPECIFIED = 0; + // HOME = 1; + // WORK = 2; + // } + // + // optional string email = 1; + // repeated EmailType type = 2; + // } + // + // string full_name = 1; + // repeated EmailAddress email_addresses = 2; + // } + // + // In this example, in proto `field` could take one of the following values: + // + // * `full_name` for a violation in the `full_name` value + // * `email_addresses[1].email` for a violation in the `email` field of the + // first `email_addresses` message + // * `email_addresses[3].type[2]` for a violation in the second `type` + // value in the third `email_addresses` message. + // + // In JSON, the same values are represented as: + // + // * `fullName` for a violation in the `fullName` value + // * `emailAddresses[1].email` for a violation in the `email` field of the + // first `emailAddresses` message + // * `emailAddresses[3].type[2]` for a violation in the second `type` + // value in the third `emailAddresses` message. string field = 1; // A description of why the request element is bad. string description = 2; + + // The reason of the field-level error. This is a constant value that + // identifies the proximate cause of the field-level error. It should + // uniquely identify the type of the FieldViolation within the scope of the + // google.rpc.ErrorInfo.domain. This should be at most 63 + // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, + // which represents UPPER_SNAKE_CASE. + string reason = 3; + + // Provides a localized error message for field-level errors that is safe to + // return to the API consumer. + LocalizedMessage localized_message = 4; } // Describes all violations in a client request. @@ -154,7 +316,8 @@ message ResourceInfo { // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. string resource_name = 2; // The owner of the resource (optional). @@ -191,7 +354,7 @@ message Help { // which can be attached to an RPC error. message LocalizedMessage { // The locale used following the specification defined at - // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. // Examples are: "en-US", "fr-CH", "es-MX" string locale = 1; diff --git a/src/rust/protos/protos/googleapis/google/rpc/status.proto b/src/rust/protos/protos/googleapis/google/rpc/status.proto index 0839ee9666a..dc14c9438ca 100644 --- a/src/rust/protos/protos/googleapis/google/rpc/status.proto +++ b/src/rust/protos/protos/googleapis/google/rpc/status.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,72 +18,29 @@ package google.rpc; import "google/protobuf/any.proto"; +option cc_enable_arenas = true; option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; option java_multiple_files = true; option java_outer_classname = "StatusProto"; option java_package = "com.google.rpc"; option objc_class_prefix = "RPC"; - -// The `Status` type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. It is used by -// [gRPC](https://github.com/grpc). The error model is designed to be: -// -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, error message, -// and error details. The error code should be an enum value of -// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The -// error message should be a developer-facing English message that helps -// developers *understand* and *resolve* the error. If a localized user-facing -// error message is needed, put the localized message in the error details or -// localize it in the client. The optional error details may contain arbitrary -// information about the error. There is a predefined set of error detail types -// in the package `google.rpc` that can be used for common error conditions. -// -// # Language mapping -// -// The `Status` message is the logical representation of the error model, but it -// is not necessarily the actual wire format. When the `Status` message is -// exposed in different client libraries and different wire protocols, it can be -// mapped differently. For example, it will likely be mapped to some exceptions -// in Java, but more likely mapped to some error codes in C. -// -// # Other uses -// -// The error model and the `Status` message can be used in a variety of -// environments, either with or without APIs, to provide a -// consistent developer experience across different environments. -// -// Example uses of this error model include: -// -// - Partial errors. If a service needs to return partial errors to the client, -// it may embed the `Status` in the normal response to indicate the partial -// errors. -// -// - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting. -// -// - Batch operations. If a client uses batch request and batch response, the -// `Status` message should be used directly inside batch response, one for -// each error sub-response. -// -// - Asynchronous operations. If an API call embeds asynchronous operation -// results in its response, the status of those operations should be -// represented directly using the `Status` message. +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. // -// - Logging. If some API errors are stored in logs, the message `Status` could -// be used directly after any stripping needed for security/privacy reasons. +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). message Status { - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. int32 code = 1; // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. string message = 2; // A list of messages that carry the error details. There is a common set of diff --git a/src/rust/remote_provider/remote_provider_reapi/src/byte_store.rs b/src/rust/remote_provider/remote_provider_reapi/src/byte_store.rs index 0cc50fce509..327e0862068 100644 --- a/src/rust/remote_provider/remote_provider_reapi/src/byte_store.rs +++ b/src/rust/remote_provider/remote_provider_reapi/src/byte_store.rs @@ -125,6 +125,7 @@ impl Provider { data: bytes, compressor: remexec::compressor::Value::Identity as i32, }], + digest_function: remexec::digest_function::Value::Unknown as i32, }; let mut client = self.cas_client.as_ref().clone(); @@ -509,6 +510,7 @@ impl ByteStoreProvider for Provider { instance_name: self.instance_name.as_ref().cloned().unwrap_or_default(), digests: digests, acceptable_compressors: vec![], + digest_function: remexec::digest_function::Value::Unknown as i32, }; let client = client.clone(); @@ -596,6 +598,7 @@ impl ByteStoreProvider for Provider { remexec::FindMissingBlobsRequest { instance_name: self.instance_name.as_ref().cloned().unwrap_or_default(), blob_digests: digests.to_vec(), + digest_function: remexec::digest_function::Value::Unknown as i32, } }); @@ -654,6 +657,7 @@ mod tests { use crate::remexec::FindMissingBlobsRequest; use prost::Message; use protos::pb::build::bazel::remote::execution::v2; + use protos::pb::build::bazel::remote::execution::v2 as remexec; use protos::pb::google::rpc; use testutil::data::TestData; @@ -664,12 +668,14 @@ mod tests { let small_request = FindMissingBlobsRequest { instance_name: instance_name.to_string(), blob_digests: vec![TestData::catnip().digest().into()], + digest_function: remexec::digest_function::Value::Unknown as i32, }; assert_eq!(small_request.encoded_len(), 70); let medium_request = FindMissingBlobsRequest { instance_name: instance_name.to_string(), blob_digests: vec![TestData::all_the_henries().digest().into()], + digest_function: remexec::digest_function::Value::Unknown as i32, }; assert_eq!(medium_request.encoded_len(), 72); @@ -679,6 +685,7 @@ mod tests { let large_request = FindMissingBlobsRequest { instance_name: instance_name.to_string(), blob_digests: vec![big_blob.into()], + digest_function: remexec::digest_function::Value::Unknown as i32, }; assert_eq!(large_request.encoded_len(), 74); @@ -688,6 +695,7 @@ mod tests { hash: big_blob.hash.to_string(), size_bytes: i64::MAX, }], + digest_function: remexec::digest_function::Value::Unknown as i32, }; assert_eq!(max_request.encoded_len(), 78); } @@ -785,6 +793,7 @@ mod tests { let request = FindMissingBlobsRequest { instance_name: instance_name.to_string(), blob_digests: blobs.clone(), + digest_function: remexec::digest_function::Value::Unknown as i32, }; let size = request.encoded_len(); diff --git a/src/rust/testutil/mock/src/cas_service.rs b/src/rust/testutil/mock/src/cas_service.rs index 19cf17d1923..4b8e7131371 100644 --- a/src/rust/testutil/mock/src/cas_service.rs +++ b/src/rust/testutil/mock/src/cas_service.rs @@ -521,6 +521,24 @@ impl ContentAddressableStorage for StubCASResponder { ) -> Result, Status> { Err(Status::unimplemented("".to_owned())) } + + async fn split_blob( + &self, + _: Request, + ) -> Result, Status> { + Err(Status::unimplemented( + "split_blob not implemented in mock".to_owned(), + )) + } + + async fn splice_blob( + &self, + _: Request, + ) -> Result, Status> { + Err(Status::unimplemented( + "splice_blob not implemented in mock".to_owned(), + )) + } } #[tonic::async_trait] diff --git a/src/rust/testutil/mock/src/execution_server.rs b/src/rust/testutil/mock/src/execution_server.rs index 97064901119..833985f7c70 100644 --- a/src/rust/testutil/mock/src/execution_server.rs +++ b/src/rust/testutil/mock/src/execution_server.rs @@ -18,7 +18,7 @@ use protos::pb::build::bazel::remote::execution::v2 as remexec; use protos::pb::build::bazel::semver::SemVer; use protos::pb::google::longrunning::{ CancelOperationRequest, DeleteOperationRequest, GetOperationRequest, ListOperationsRequest, - ListOperationsResponse, Operation, operations_server::Operations, + ListOperationsResponse, Operation, WaitOperationRequest, operations_server::Operations, operations_server::OperationsServer, }; use protos::require_digest; @@ -424,6 +424,15 @@ impl Operations for MockResponder { self.cancelation_requests.lock().push(request); Ok(Response::new(())) } + + async fn wait_operation( + &self, + _: Request, + ) -> Result, Status> { + Err(Status::unimplemented( + "wait_operation not implemented in mock".to_owned(), + )) + } } #[tonic::async_trait]