Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 73 additions & 10 deletions components/clarinet-deployments/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ pub fn setup_session_with_deployment(
success,
session,
analysis: contracts_analysis,
ast_metadata: HashMap::new(), // Not populated in this code path
}
}

Expand Down Expand Up @@ -276,11 +277,39 @@ fn handle_emulated_contract_call(
result
}

/// Cached AST data for a contract, used for IDE performance optimization
#[derive(Debug, Clone)]
pub struct CachedContractASTData {
pub content_hash: u64,
pub ast: ContractAST,
pub clarity_version: clarity_repl::clarity::ClarityVersion,
pub epoch: StacksEpochId,
}

/// Compute a hash of the contract source code for cache validation
pub fn compute_content_hash(source: &str) -> u64 {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
source.hash(&mut hasher);
hasher.finish()
}

pub async fn generate_default_deployment(
manifest: &ProjectManifest,
network: &StacksNetwork,
no_batch: bool,
file_accessor: Option<&dyn FileAccessor>,
) -> Result<(DeploymentSpecification, DeploymentGenerationArtifacts), String> {
generate_default_deployment_with_cache(manifest, network, no_batch, file_accessor, None).await
}

pub async fn generate_default_deployment_with_cache(
manifest: &ProjectManifest,
network: &StacksNetwork,
no_batch: bool,
file_accessor: Option<&dyn FileAccessor>,
cached_asts: Option<&HashMap<FileLocation, CachedContractASTData>>,
) -> Result<(DeploymentSpecification, DeploymentGenerationArtifacts), String> {
let network_manifest = match file_accessor {
None => NetworkManifest::from_project_manifest_location(
Expand Down Expand Up @@ -793,13 +822,16 @@ pub async fn generate_default_deployment(

contracts_sources.insert(
contract_id.clone(),
ClarityContract {
code_source: ClarityCodeSource::ContractInMemory(source.clone()),
deployer: ContractDeployer::Address(sender.to_address()),
name: contract_name.to_string(),
clarity_version: contract_config.clarity_version,
epoch: clarity_repl::repl::Epoch::Specific(epoch),
},
(
ClarityContract {
code_source: ClarityCodeSource::ContractInMemory(source.clone()),
deployer: ContractDeployer::Address(sender.to_address()),
name: contract_name.to_string(),
clarity_version: contract_config.clarity_version,
epoch: clarity_repl::repl::Epoch::Specific(epoch),
},
contract_location.clone(),
),
);

let contract_spec = if matches!(network, StacksNetwork::Simnet) {
Expand Down Expand Up @@ -831,13 +863,43 @@ pub async fn generate_default_deployment(

let mut contract_asts = BTreeMap::new();
let mut contract_data = BTreeMap::new();
let mut ast_metadata = HashMap::new();

for (contract_id, (contract, contract_location)) in contracts_sources.into_iter() {
let source = contract.expect_in_memory_code_source();
let content_hash = compute_content_hash(source);
let resolved_epoch = contract.epoch.resolve();

// Check if we have a valid cached AST
let (ast, diags, ast_success) = if let Some(cached) = cached_asts
.and_then(|cache| cache.get(&contract_location))
.filter(|c| {
c.content_hash == content_hash
&& c.clarity_version == contract.clarity_version
&& c.epoch == resolved_epoch
}) {
// Reuse cached AST - no diagnostics since it was already validated
(cached.ast.clone(), vec![], true)
} else {
// Build new AST
session.interpreter.build_ast(&contract)
};

// Track metadata for caching
ast_metadata.insert(
contract_id.clone(),
types::ContractASTMetadata {
location: contract_location,
content_hash,
clarity_version: contract.clarity_version,
epoch: resolved_epoch,
},
);

for (contract_id, contract) in contracts_sources.into_iter() {
let (ast, diags, ast_success) = session.interpreter.build_ast(&contract);
contract_asts.insert(contract_id.clone(), ast.clone());
contract_data.insert(contract_id.clone(), (contract.clarity_version, ast));
contract_diags.insert(contract_id.clone(), diags);
contract_epochs.insert(contract_id, contract.epoch.resolve());
contract_epochs.insert(contract_id, resolved_epoch);
asts_success = asts_success && ast_success;
}

Expand Down Expand Up @@ -988,6 +1050,7 @@ pub async fn generate_default_deployment(
results_values: HashMap::new(),
analysis: HashMap::new(),
session,
ast_metadata,
};

Ok((deployment, artifacts))
Expand Down
11 changes: 11 additions & 0 deletions components/clarinet-deployments/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,15 @@ fn try_clarity_version_from_option(value: Option<u8>) -> Result<ClarityVersion,
}
}

/// Metadata for caching ASTs in the LSP
#[derive(Clone, Debug)]
pub struct ContractASTMetadata {
pub location: FileLocation,
pub content_hash: u64,
pub clarity_version: ClarityVersion,
pub epoch: StacksEpochId,
}

#[derive(Clone)]
pub struct DeploymentGenerationArtifacts {
pub asts: BTreeMap<QualifiedContractIdentifier, ContractAST>,
Expand All @@ -147,6 +156,8 @@ pub struct DeploymentGenerationArtifacts {
pub results_values: HashMap<QualifiedContractIdentifier, Option<Value>>,
pub session: Session,
pub success: bool,
/// Metadata for AST caching (location, content hash, version info per contract)
pub ast_metadata: HashMap<QualifiedContractIdentifier, ContractASTMetadata>,
}

#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
Expand Down
64 changes: 48 additions & 16 deletions components/clarity-lsp/src/common/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,15 @@ pub async fn process_notification(

// With this manifest_location, let's initialize our state.
let mut protocol_state = ProtocolState::new();
match build_state(&manifest_location, &mut protocol_state, file_accessor).await {
Ok(_) => {
editor_state
.try_write(|es| es.index_protocol(manifest_location, protocol_state))?;
match build_state(&manifest_location, &mut protocol_state, file_accessor, None).await {
Ok(new_cache_entries) => {
editor_state.try_write(|es| {
es.index_protocol(manifest_location, protocol_state);
// Populate the cache with the newly built ASTs
for (loc, cached) in new_cache_entries {
es.cache_ast(loc, cached);
}
})?;
let (aggregated_diagnostics, notification) =
editor_state.try_read(|es| es.get_aggregated_diagnostics())?;
Ok(LspNotificationResponse {
Expand All @@ -106,12 +111,20 @@ pub async fn process_notification(
}

LspNotification::ManifestSaved(manifest_location) => {
// We will rebuild the entire state, without to try any optimizations for now
// Clear the AST cache when manifest changes (clarity version, epoch settings may have changed)
editor_state.try_write(|es| es.clear_ast_cache())?;

// We will rebuild the entire state without cache
let mut protocol_state = ProtocolState::new();
match build_state(&manifest_location, &mut protocol_state, file_accessor).await {
Ok(_) => {
editor_state
.try_write(|es| es.index_protocol(manifest_location, protocol_state))?;
match build_state(&manifest_location, &mut protocol_state, file_accessor, None).await {
Ok(new_cache_entries) => {
editor_state.try_write(|es| {
es.index_protocol(manifest_location, protocol_state);
// Populate the cache with the newly built ASTs
for (loc, cached) in new_cache_entries {
es.cache_ast(loc, cached);
}
})?;
let (aggregated_diagnostics, notification) =
editor_state.try_read(|es| es.get_aggregated_diagnostics())?;
Ok(LspNotificationResponse {
Expand Down Expand Up @@ -219,10 +232,15 @@ pub async fn process_notification(
}

let mut protocol_state = ProtocolState::new();
match build_state(&manifest_location, &mut protocol_state, file_accessor).await {
Ok(_) => {
editor_state
.try_write(|es| es.index_protocol(manifest_location, protocol_state))?;
match build_state(&manifest_location, &mut protocol_state, file_accessor, None).await {
Ok(new_cache_entries) => {
editor_state.try_write(|es| {
es.index_protocol(manifest_location, protocol_state);
// Populate the cache with the newly built ASTs
for (loc, cached) in new_cache_entries {
es.cache_ast(loc, cached);
}
})?;
let (aggregated_diagnostics, notification) =
editor_state.try_read(|es| es.get_aggregated_diagnostics())?;
Ok(LspNotificationResponse {
Expand All @@ -235,6 +253,9 @@ pub async fn process_notification(
}

LspNotification::ContractSaved(contract_location) => {
// Get cached ASTs from editor state before clearing the protocol
let cached_asts = editor_state.try_read(|es| es.ast_cache.clone())?;

let manifest_location = match editor_state
.try_write(|es| es.clear_protocol_associated_with_contract(&contract_location))?
{
Expand All @@ -246,12 +267,23 @@ pub async fn process_notification(
}
};

// TODO(): introduce partial analysis #604
// Use cached ASTs for contracts that haven't changed
let mut protocol_state = ProtocolState::new();
match build_state(&manifest_location, &mut protocol_state, file_accessor).await {
Ok(_) => {
match build_state(
&manifest_location,
&mut protocol_state,
file_accessor,
Some(&cached_asts),
)
.await
{
Ok(new_cache_entries) => {
editor_state.try_write(|es| {
es.index_protocol(manifest_location, protocol_state);
// Update cache with new entries
for (loc, cached) in new_cache_entries {
es.cache_ast(loc, cached);
}
if let Some(contract) = es.active_contracts.get_mut(&contract_location) {
contract.update_definitions();
};
Expand Down
Loading