diff --git a/.gitignore b/.gitignore index dcfcfbf831..86b3384ac1 100644 --- a/.gitignore +++ b/.gitignore @@ -62,3 +62,5 @@ devenv/env-out.toml .cursorrules .claude/ operations-gen +.agents/ +AGENTS.md diff --git a/chains/evm/deployment/v1_6_0/changesets/migrate_hybrid_pool_remote.go b/chains/evm/deployment/v1_6_0/changesets/migrate_hybrid_pool_remote.go new file mode 100644 index 0000000000..5dfc7777c0 --- /dev/null +++ b/chains/evm/deployment/v1_6_0/changesets/migrate_hybrid_pool_remote.go @@ -0,0 +1,480 @@ +package changesets + +import ( + "bytes" + "fmt" + + "github.com/Masterminds/semver/v3" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + chain_selectors "github.com/smartcontractkit/chain-selectors" + "github.com/smartcontractkit/chainlink-evm/gethwrappers/shared/generated/initial/erc20" + + burn_mint_with_external_minter_token_pool_bindings "github.com/smartcontractkit/ccip-contract-examples/chains/evm/gobindings/generated/latest/burn_mint_with_external_minter_token_pool" + hybrid_with_external_minter_token_pool_bindings "github.com/smartcontractkit/ccip-contract-examples/chains/evm/gobindings/generated/latest/hybrid_with_external_minter_token_pool" + evm_utils "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/utils" + evm_datastore_utils "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/utils/datastore" + evm_contract "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/utils/operations/contract" + tar_ops "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_5_0/operations/token_admin_registry" + v1_5_1_lock_release_token_pool_ops "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_5_1/operations/lock_release_token_pool" + v1_6_0_burn_mint_with_external_minter_token_pool_ops "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/burn_mint_with_external_minter_token_pool" + v1_6_0_hybrid_pool_ops "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/hybrid_with_external_minter_token_pool" + v1_6_0_sequences "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/sequences" + v1_5_1_lock_release_token_pool_bindings "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_5_1/lock_release_token_pool" + "github.com/smartcontractkit/chainlink-ccip/deployment/utils/changesets" + datastore_utils "github.com/smartcontractkit/chainlink-ccip/deployment/utils/datastore" + "github.com/smartcontractkit/chainlink-ccip/deployment/utils/mcms" + cldf_datastore "github.com/smartcontractkit/chainlink-deployments-framework/datastore" + cldf "github.com/smartcontractkit/chainlink-deployments-framework/deployment" + cldf_ops "github.com/smartcontractkit/chainlink-deployments-framework/operations" +) + +type MigrateHybridPoolRemoteConfig struct { + HubChainSelector uint64 `json:"hubChainSelector" yaml:"hubChainSelector"` + HubPoolAddress common.Address `json:"hubPoolAddress" yaml:"hubPoolAddress"` + RemoteChainSelector uint64 `json:"remoteChainSelector" yaml:"remoteChainSelector"` + NewRemotePoolAddress common.Address `json:"newRemotePoolAddress" yaml:"newRemotePoolAddress"` + OldRemotePoolAddress common.Address `json:"oldRemotePoolAddress" yaml:"oldRemotePoolAddress"` + TargetGroup uint8 `json:"targetGroup" yaml:"targetGroup"` + RemoteTokenAddress common.Address `json:"remoteTokenAddress" yaml:"remoteTokenAddress"` + MCMS mcms.Input `json:"mcms,omitempty" yaml:"mcms,omitempty"` +} + +func MigrateHybridPoolRemote(mcmsRegistry *changesets.MCMSReaderRegistry) cldf.ChangeSetV2[MigrateHybridPoolRemoteConfig] { + return cldf.CreateChangeSet( + makeApplyMigrateHybridPoolRemote(mcmsRegistry), + makeVerifyMigrateHybridPoolRemote(mcmsRegistry), + ) +} + +func makeVerifyMigrateHybridPoolRemote( + mcmsRegistry *changesets.MCMSReaderRegistry, +) func(cldf.Environment, MigrateHybridPoolRemoteConfig) error { + return func(e cldf.Environment, cfg MigrateHybridPoolRemoteConfig) error { + if err := cfg.MCMS.Validate(); err != nil { + return fmt.Errorf("invalid MCMS config: %w", err) + } + if cfg.HubChainSelector == cfg.RemoteChainSelector { + return fmt.Errorf("hub chain selector and remote chain selector must be different") + } + if cfg.TargetGroup != 0 && cfg.TargetGroup != 1 { + return fmt.Errorf("target group must be 0 or 1, got %d", cfg.TargetGroup) + } + if cfg.HubPoolAddress == (common.Address{}) { + return fmt.Errorf("hub pool address cannot be the zero address") + } + if cfg.NewRemotePoolAddress == (common.Address{}) { + return fmt.Errorf("new remote pool address cannot be the zero address") + } + if cfg.OldRemotePoolAddress == (common.Address{}) { + return fmt.Errorf("old remote pool address cannot be the zero address") + } + if cfg.OldRemotePoolAddress == cfg.NewRemotePoolAddress { + return fmt.Errorf("old remote pool address and new remote pool address must be different") + } + if cfg.RemoteTokenAddress == (common.Address{}) { + return fmt.Errorf("remote token address cannot be the zero address") + } + + hubChain, ok := e.BlockChains.EVMChains()[cfg.HubChainSelector] + if !ok { + return fmt.Errorf("hub chain selector %d is not configured as an EVM chain", cfg.HubChainSelector) + } + remoteChain, ok := e.BlockChains.EVMChains()[cfg.RemoteChainSelector] + if !ok { + return fmt.Errorf("remote chain selector %d is not configured as an EVM chain", cfg.RemoteChainSelector) + } + + remoteTARAddress, err := resolveRemoteTARAddress(e.DataStore, cfg.RemoteChainSelector) + if err != nil { + return err + } + + if mcmsRegistry == nil { + return fmt.Errorf("no MCMS reader registry configured") + } + mcmsReader, ok := mcmsRegistry.GetMCMSReader(chain_selectors.FamilyEVM) + if !ok { + return fmt.Errorf("no MCMS reader registered for chain family '%s'", chain_selectors.FamilyEVM) + } + + hubTimelockRef, err := mcmsReader.GetTimelockRef(e, cfg.HubChainSelector, cfg.MCMS) + if err != nil { + return fmt.Errorf("failed to resolve timelock for hub chain %d with qualifier %s: %w", cfg.HubChainSelector, cfg.MCMS.Qualifier, err) + } + if hubTimelockRef.Address == "" { + return fmt.Errorf("missing timelock for hub chain %d with qualifier %s", cfg.HubChainSelector, cfg.MCMS.Qualifier) + } + if !common.IsHexAddress(hubTimelockRef.Address) { + return fmt.Errorf("invalid timelock address for hub chain %d with qualifier %s: %q", cfg.HubChainSelector, cfg.MCMS.Qualifier, hubTimelockRef.Address) + } + hubMCMSRef, err := mcmsReader.GetMCMSRef(e, cfg.HubChainSelector, cfg.MCMS) + if err != nil { + return fmt.Errorf("failed to resolve MCMS for hub chain %d with qualifier %s: %w", cfg.HubChainSelector, cfg.MCMS.Qualifier, err) + } + if hubMCMSRef.Address == "" { + return fmt.Errorf("missing MCMS for hub chain %d with qualifier %s", cfg.HubChainSelector, cfg.MCMS.Qualifier) + } + if !common.IsHexAddress(hubMCMSRef.Address) { + return fmt.Errorf("invalid MCMS address for hub chain %d with qualifier %s: %q", cfg.HubChainSelector, cfg.MCMS.Qualifier, hubMCMSRef.Address) + } + + remoteTimelockRef, err := mcmsReader.GetTimelockRef(e, cfg.RemoteChainSelector, cfg.MCMS) + if err != nil { + return fmt.Errorf("failed to resolve timelock for remote chain %d with qualifier %s: %w", cfg.RemoteChainSelector, cfg.MCMS.Qualifier, err) + } + if remoteTimelockRef.Address == "" { + return fmt.Errorf("missing timelock for remote chain %d with qualifier %s", cfg.RemoteChainSelector, cfg.MCMS.Qualifier) + } + if !common.IsHexAddress(remoteTimelockRef.Address) { + return fmt.Errorf("invalid timelock address for remote chain %d with qualifier %s: %q", cfg.RemoteChainSelector, cfg.MCMS.Qualifier, remoteTimelockRef.Address) + } + remoteMCMSRef, err := mcmsReader.GetMCMSRef(e, cfg.RemoteChainSelector, cfg.MCMS) + if err != nil { + return fmt.Errorf("failed to resolve MCMS for remote chain %d with qualifier %s: %w", cfg.RemoteChainSelector, cfg.MCMS.Qualifier, err) + } + if remoteMCMSRef.Address == "" { + return fmt.Errorf("missing MCMS for remote chain %d with qualifier %s", cfg.RemoteChainSelector, cfg.MCMS.Qualifier) + } + if !common.IsHexAddress(remoteMCMSRef.Address) { + return fmt.Errorf("invalid MCMS address for remote chain %d with qualifier %s: %q", cfg.RemoteChainSelector, cfg.MCMS.Qualifier, remoteMCMSRef.Address) + } + + if err := verifyTypeAndVersion( + e.DataStore, cfg.HubChainSelector, cfg.HubPoolAddress, + cldf_datastore.ContractType(v1_6_0_hybrid_pool_ops.ContractType), v1_6_0_hybrid_pool_ops.Version, + hubChain.Client, "hub pool", + ); err != nil { + return err + } + if err := verifyTypeAndVersion( + e.DataStore, cfg.RemoteChainSelector, cfg.OldRemotePoolAddress, + cldf_datastore.ContractType(v1_5_1_lock_release_token_pool_ops.ContractType), v1_5_1_lock_release_token_pool_ops.Version, + remoteChain.Client, "old remote pool", + ); err != nil { + return err + } + if err := verifyTypeAndVersion( + e.DataStore, cfg.RemoteChainSelector, cfg.NewRemotePoolAddress, + cldf_datastore.ContractType(v1_6_0_burn_mint_with_external_minter_token_pool_ops.ContractType), v1_6_0_burn_mint_with_external_minter_token_pool_ops.Version, + remoteChain.Client, "new remote pool", + ); err != nil { + return err + } + + oldPool, err := v1_5_1_lock_release_token_pool_bindings.NewLockReleaseTokenPool(cfg.OldRemotePoolAddress, remoteChain.Client) + if err != nil { + return fmt.Errorf("failed to bind old remote pool %s on chain %d: %w", cfg.OldRemotePoolAddress, cfg.RemoteChainSelector, err) + } + oldPoolToken, err := oldPool.GetToken(&bind.CallOpts{Context: e.GetContext()}) + if err != nil { + return fmt.Errorf("failed to read token from old remote pool %s on chain %d: %w", cfg.OldRemotePoolAddress, cfg.RemoteChainSelector, err) + } + if oldPoolToken != cfg.RemoteTokenAddress { + return fmt.Errorf("old remote pool token %s does not match remote token %s", oldPoolToken, cfg.RemoteTokenAddress) + } + + newPool, err := burn_mint_with_external_minter_token_pool_bindings.NewBurnMintWithExternalMinterTokenPool(cfg.NewRemotePoolAddress, remoteChain.Client) + if err != nil { + return fmt.Errorf("failed to bind new remote pool %s on chain %d: %w", cfg.NewRemotePoolAddress, cfg.RemoteChainSelector, err) + } + newPoolToken, err := newPool.GetToken(&bind.CallOpts{Context: e.GetContext()}) + if err != nil { + return fmt.Errorf("failed to read token from new remote pool %s on chain %d: %w", cfg.NewRemotePoolAddress, cfg.RemoteChainSelector, err) + } + if newPoolToken != cfg.RemoteTokenAddress { + return fmt.Errorf("new remote pool token %s does not match remote token %s", newPoolToken, cfg.RemoteTokenAddress) + } + + hubPool, err := hybrid_with_external_minter_token_pool_bindings.NewHybridWithExternalMinterTokenPool(cfg.HubPoolAddress, hubChain.Client) + if err != nil { + return fmt.Errorf("failed to bind hub pool %s on chain %d: %w", cfg.HubPoolAddress, cfg.HubChainSelector, err) + } + isSupportedChain, err := hubPool.IsSupportedChain(&bind.CallOpts{Context: e.GetContext()}, cfg.RemoteChainSelector) + if err != nil { + return fmt.Errorf("failed to read supported-chain status for remote chain %d from hub pool %s on chain %d: %w", cfg.RemoteChainSelector, cfg.HubPoolAddress, cfg.HubChainSelector, err) + } + if !isSupportedChain { + return fmt.Errorf("remote chain %d is not supported on hub pool %s on chain %d", cfg.RemoteChainSelector, cfg.HubPoolAddress, cfg.HubChainSelector) + } + + hubRemotePools, err := hubPool.GetRemotePools(&bind.CallOpts{Context: e.GetContext()}, cfg.RemoteChainSelector) + if err != nil { + return fmt.Errorf("failed to read hub remote pools for remote chain %d from hub pool %s on chain %d: %w", cfg.RemoteChainSelector, cfg.HubPoolAddress, cfg.HubChainSelector, err) + } + oldPoolBytes := common.LeftPadBytes(cfg.OldRemotePoolAddress.Bytes(), 32) + newPoolBytes := common.LeftPadBytes(cfg.NewRemotePoolAddress.Bytes(), 32) + oldPoolPresent := false + newPoolPresent := false + for _, remotePool := range hubRemotePools { + switch { + case bytes.Equal(remotePool, oldPoolBytes): + oldPoolPresent = true + case bytes.Equal(remotePool, newPoolBytes): + newPoolPresent = true + default: + return fmt.Errorf("unexpected pool %x in hub remote pool set for chain %d", remotePool, cfg.RemoteChainSelector) + } + } + if !oldPoolPresent && !newPoolPresent { + return fmt.Errorf("neither old pool %s nor new pool %s registered in hub remote pool set for chain %d", cfg.OldRemotePoolAddress, cfg.NewRemotePoolAddress, cfg.RemoteChainSelector) + } + + hubRemoteToken, err := hubPool.GetRemoteToken(&bind.CallOpts{Context: e.GetContext()}, cfg.RemoteChainSelector) + if err != nil { + return fmt.Errorf("failed to read hub remote token bytes for remote chain %d from hub pool %s on chain %d: %w", cfg.RemoteChainSelector, cfg.HubPoolAddress, cfg.HubChainSelector, err) + } + if !bytesAddressMatches(hubRemoteToken, cfg.RemoteTokenAddress) { + return fmt.Errorf( + "hub remote token bytes %x do not match remote token %s for chain %d", + hubRemoteToken, + cfg.RemoteTokenAddress, + cfg.RemoteChainSelector, + ) + } + + hubPoolOwner, err := hubPool.Owner(&bind.CallOpts{Context: e.GetContext()}) + if err != nil { + return fmt.Errorf("failed to read owner for hub pool %s on chain %d: %w", cfg.HubPoolAddress, cfg.HubChainSelector, err) + } + hubTimelockAddress := common.HexToAddress(hubTimelockRef.Address) + if hubPoolOwner != hubTimelockAddress { + return fmt.Errorf( + "hub pool %s owner %s does not match timelock %s on chain %d", + cfg.HubPoolAddress, + hubPoolOwner, + hubTimelockAddress, + cfg.HubChainSelector, + ) + } + + tarConfigReport, err := cldf_ops.ExecuteOperation(e.OperationsBundle, tar_ops.GetTokenConfig, remoteChain, evm_contract.FunctionInput[common.Address]{ + ChainSelector: cfg.RemoteChainSelector, + Address: remoteTARAddress, + Args: cfg.RemoteTokenAddress, + }) + if err != nil { + return fmt.Errorf("failed to read TAR token config for token %s on chain %d: %w", cfg.RemoteTokenAddress, cfg.RemoteChainSelector, err) + } + remoteTimelockAddress := common.HexToAddress(remoteTimelockRef.Address) + if tarConfigReport.Output.Administrator != remoteTimelockAddress { + return fmt.Errorf( + "TAR administrator %s for token %s does not match timelock %s on chain %d", + tarConfigReport.Output.Administrator, + cfg.RemoteTokenAddress, + remoteTimelockAddress, + cfg.RemoteChainSelector, + ) + } + tarPool := tarConfigReport.Output.TokenPool + if tarPool == (common.Address{}) { + return fmt.Errorf("TAR has no pool set for token %s on chain %d", cfg.RemoteTokenAddress, cfg.RemoteChainSelector) + } + if tarPool != cfg.OldRemotePoolAddress && tarPool != cfg.NewRemotePoolAddress { + return fmt.Errorf( + "TAR pool %s for token %s on chain %d is neither old pool %s nor new pool %s", + tarPool, + cfg.RemoteTokenAddress, + cfg.RemoteChainSelector, + cfg.OldRemotePoolAddress, + cfg.NewRemotePoolAddress, + ) + } + + remoteToken, err := erc20.NewERC20(cfg.RemoteTokenAddress, remoteChain.Client) + if err != nil { + return fmt.Errorf("failed to bind remote token %s on chain %d: %w", cfg.RemoteTokenAddress, cfg.RemoteChainSelector, err) + } + totalSupply, err := remoteToken.TotalSupply(&bind.CallOpts{Context: e.GetContext()}) + if err != nil { + return fmt.Errorf("failed to read totalSupply for remote token %s on chain %d: %w", cfg.RemoteTokenAddress, cfg.RemoteChainSelector, err) + } + + if cfg.TargetGroup == 1 { + lockedTokensReport, err := cldf_ops.ExecuteOperation(e.OperationsBundle, v1_6_0_hybrid_pool_ops.GetLockedTokens, hubChain, evm_contract.FunctionInput[struct{}]{ + ChainSelector: cfg.HubChainSelector, + Address: cfg.HubPoolAddress, + Args: struct{}{}, + }) + if err != nil { + return fmt.Errorf("failed to read locked token accounting from hub pool %s on chain %d: %w", cfg.HubPoolAddress, cfg.HubChainSelector, err) + } + if lockedTokensReport.Output == nil { + return fmt.Errorf("hub pool %s returned nil locked token accounting on chain %d", cfg.HubPoolAddress, cfg.HubChainSelector) + } + if totalSupply.Cmp(lockedTokensReport.Output) > 0 { + return fmt.Errorf( + "remote token totalSupply %s exceeds locked token accounting %s for hub pool %s on chain %d", + totalSupply.String(), + lockedTokensReport.Output.String(), + cfg.HubPoolAddress, + cfg.HubChainSelector, + ) + } + } + + return nil + } +} + +func makeApplyMigrateHybridPoolRemote( + mcmsRegistry *changesets.MCMSReaderRegistry, +) func(cldf.Environment, MigrateHybridPoolRemoteConfig) (cldf.ChangesetOutput, error) { + return func(e cldf.Environment, cfg MigrateHybridPoolRemoteConfig) (cldf.ChangesetOutput, error) { + remoteTARAddress, err := resolveRemoteTARAddress(e.DataStore, cfg.RemoteChainSelector) + if err != nil { + return cldf.ChangesetOutput{}, err + } + + hubChain, ok := e.BlockChains.EVMChains()[cfg.HubChainSelector] + if !ok { + return cldf.ChangesetOutput{}, fmt.Errorf("hub chain selector %d is not configured as an EVM chain", cfg.HubChainSelector) + } + remoteChain, ok := e.BlockChains.EVMChains()[cfg.RemoteChainSelector] + if !ok { + return cldf.ChangesetOutput{}, fmt.Errorf("remote chain selector %d is not configured as an EVM chain", cfg.RemoteChainSelector) + } + + hubPool, err := hybrid_with_external_minter_token_pool_bindings.NewHybridWithExternalMinterTokenPool(cfg.HubPoolAddress, hubChain.Client) + if err != nil { + return cldf.ChangesetOutput{}, fmt.Errorf("failed to bind hub pool %s on chain %d: %w", cfg.HubPoolAddress, cfg.HubChainSelector, err) + } + hubTokenAddress, err := hubPool.GetToken(&bind.CallOpts{Context: e.GetContext()}) + if err != nil { + return cldf.ChangesetOutput{}, fmt.Errorf("failed to read token from hub pool %s on chain %d: %w", cfg.HubPoolAddress, cfg.HubChainSelector, err) + } + + remoteToken, err := erc20.NewERC20(cfg.RemoteTokenAddress, remoteChain.Client) + if err != nil { + return cldf.ChangesetOutput{}, fmt.Errorf("failed to bind remote token %s on chain %d: %w", cfg.RemoteTokenAddress, cfg.RemoteChainSelector, err) + } + totalSupply, err := remoteToken.TotalSupply(&bind.CallOpts{Context: e.GetContext()}) + if err != nil { + return cldf.ChangesetOutput{}, fmt.Errorf("failed to read totalSupply for remote token %s on chain %d: %w", cfg.RemoteTokenAddress, cfg.RemoteChainSelector, err) + } + + report, err := cldf_ops.ExecuteSequence(e.OperationsBundle, v1_6_0_sequences.MigrateHybridPoolRemote, e.BlockChains, v1_6_0_sequences.MigrateHybridPoolRemoteInput{ + HubChainSelector: cfg.HubChainSelector, + HubPoolAddress: cfg.HubPoolAddress, + RemoteChainSelector: cfg.RemoteChainSelector, + NewRemotePoolAddress: cfg.NewRemotePoolAddress, + OldRemotePoolAddress: cfg.OldRemotePoolAddress, + RemoteChainSupply: totalSupply, + TargetGroup: cfg.TargetGroup, + RemoteTARAddress: remoteTARAddress, + RemoteTokenAddress: cfg.RemoteTokenAddress, + }) + if err != nil { + return cldf.ChangesetOutput{}, fmt.Errorf("failed to migrate token pool: %w", err) + } + + ds := cldf_datastore.NewMemoryDataStore() + if !AddressRefExistsWithTypeVersion( + e.DataStore, + cfg.HubChainSelector, + cfg.HubPoolAddress, + cldf_datastore.ContractType(v1_6_0_hybrid_pool_ops.ContractType), + v1_6_0_hybrid_pool_ops.Version, + ) { + if err := ds.Addresses().Add(cldf_datastore.AddressRef{ + ChainSelector: cfg.HubChainSelector, + Type: cldf_datastore.ContractType(v1_6_0_hybrid_pool_ops.ContractType), + Version: v1_6_0_hybrid_pool_ops.Version, + Address: cfg.HubPoolAddress.Hex(), + Qualifier: hubTokenAddress.Hex(), + }); err != nil { + return cldf.ChangesetOutput{}, fmt.Errorf("failed to persist hub pool ref %s on chain %d: %w", cfg.HubPoolAddress, cfg.HubChainSelector, err) + } + } + if !AddressRefExistsWithTypeVersion( + e.DataStore, + cfg.RemoteChainSelector, + cfg.NewRemotePoolAddress, + cldf_datastore.ContractType(v1_6_0_burn_mint_with_external_minter_token_pool_ops.ContractType), + v1_6_0_burn_mint_with_external_minter_token_pool_ops.Version, + ) { + if err := ds.Addresses().Add(cldf_datastore.AddressRef{ + ChainSelector: cfg.RemoteChainSelector, + Type: cldf_datastore.ContractType(v1_6_0_burn_mint_with_external_minter_token_pool_ops.ContractType), + Version: v1_6_0_burn_mint_with_external_minter_token_pool_ops.Version, + Address: cfg.NewRemotePoolAddress.Hex(), + Qualifier: cfg.RemoteTokenAddress.Hex(), + }); err != nil { + return cldf.ChangesetOutput{}, fmt.Errorf("failed to persist new remote pool ref %s on chain %d: %w", cfg.NewRemotePoolAddress, cfg.RemoteChainSelector, err) + } + } + + return changesets.NewOutputBuilder(e, mcmsRegistry). + WithReports(report.ExecutionReports). + WithBatchOps(report.Output.BatchOps). + WithDataStore(ds). + Build(cfg.MCMS) + } +} + +func resolveRemoteTARAddress(ds cldf_datastore.DataStore, remoteChainSelector uint64) (common.Address, error) { + tarRef := cldf_datastore.AddressRef{ + Type: cldf_datastore.ContractType(tar_ops.ContractType), + ChainSelector: remoteChainSelector, + Version: tar_ops.Version, + } + addr, err := datastore_utils.FindAndFormatRef(ds, tarRef, remoteChainSelector, evm_datastore_utils.ToEVMAddress) + if err != nil { + return common.Address{}, fmt.Errorf("failed to resolve TokenAdminRegistry address on chain %d: %w", remoteChainSelector, err) + } + return addr, nil +} + +func verifyTypeAndVersion( + ds cldf_datastore.DataStore, + chainSelector uint64, + address common.Address, + expectedType cldf_datastore.ContractType, + expectedVersion *semver.Version, + backend bind.ContractBackend, + label string, +) error { + refs := ds.Addresses().Filter( + cldf_datastore.AddressRefByChainSelector(chainSelector), + cldf_datastore.AddressRefByAddress(address.Hex()), + cldf_datastore.AddressRefByType(expectedType), + cldf_datastore.AddressRefByVersion(expectedVersion), + ) + if len(refs) > 0 { + return nil + } + + contractType, version, err := evm_utils.TypeAndVersion(address, backend) + if err != nil { + return fmt.Errorf("failed to read typeAndVersion for %s %s on chain %d: %w", label, address, chainSelector, err) + } + actual := fmt.Sprintf("%s %s", contractType, version.String()) + expected := fmt.Sprintf("%s %s", expectedType, expectedVersion.String()) + if actual != expected { + return fmt.Errorf( + "unexpected typeAndVersion %q for %s %s on chain %d, expected %q", + actual, label, address, chainSelector, expected, + ) + } + return nil +} + +func AddressRefExistsWithTypeVersion( + ds cldf_datastore.DataStore, + chainSelector uint64, + address common.Address, + expectedType cldf_datastore.ContractType, + expectedVersion *semver.Version, +) bool { + return len(ds.Addresses().Filter( + cldf_datastore.AddressRefByChainSelector(chainSelector), + cldf_datastore.AddressRefByAddress(address.Hex()), + cldf_datastore.AddressRefByType(expectedType), + cldf_datastore.AddressRefByVersion(expectedVersion), + )) > 0 +} + +func bytesAddressMatches(encoded []byte, expectedAddress common.Address) bool { + paddedExpected := common.LeftPadBytes(expectedAddress.Bytes(), 32) + return bytes.Equal(encoded, paddedExpected) || bytes.Equal(encoded, expectedAddress.Bytes()) +} diff --git a/chains/evm/deployment/v1_6_0/changesets/migrate_hybrid_pool_remote_test.go b/chains/evm/deployment/v1_6_0/changesets/migrate_hybrid_pool_remote_test.go new file mode 100644 index 0000000000..6f427334aa --- /dev/null +++ b/chains/evm/deployment/v1_6_0/changesets/migrate_hybrid_pool_remote_test.go @@ -0,0 +1,561 @@ +package changesets_test + +import ( + "bytes" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + burn_mint_with_external_minter_token_pool_bindings "github.com/smartcontractkit/ccip-contract-examples/chains/evm/gobindings/generated/latest/burn_mint_with_external_minter_token_pool" + hybrid_with_external_minter_token_pool_bindings "github.com/smartcontractkit/ccip-contract-examples/chains/evm/gobindings/generated/latest/hybrid_with_external_minter_token_pool" + token_governor_bindings "github.com/smartcontractkit/ccip-contract-examples/chains/evm/gobindings/generated/latest/token_governor" + chainsel "github.com/smartcontractkit/chain-selectors" + tar_ops "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_5_0/operations/token_admin_registry" + v1_6_0_changesets "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/changesets" + v1_6_0_burn_mint_with_external_minter_token_pool_ops "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/burn_mint_with_external_minter_token_pool" + v1_6_0_hybrid_pool_ops "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/hybrid_with_external_minter_token_pool" + token_admin_registry_bindings "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_5_0/token_admin_registry" + lock_release_token_pool_bindings "github.com/smartcontractkit/chainlink-ccip/chains/evm/gobindings/generated/v1_5_1/lock_release_token_pool" + core_changesets "github.com/smartcontractkit/chainlink-ccip/deployment/utils/changesets" + "github.com/smartcontractkit/chainlink-ccip/deployment/utils/mcms" + "github.com/smartcontractkit/chainlink-deployments-framework/chain/evm" + cldf_datastore "github.com/smartcontractkit/chainlink-deployments-framework/datastore" + cldf "github.com/smartcontractkit/chainlink-deployments-framework/deployment" + "github.com/smartcontractkit/chainlink-deployments-framework/engine/test/environment" + burn_mint_erc20_bindings "github.com/smartcontractkit/chainlink-evm/gethwrappers/shared/generated/initial/burn_mint_erc20" + mcms_types "github.com/smartcontractkit/mcms/types" +) + +type migrateHybridPoolRemoteMockReader struct { + timelockByChain map[uint64]string + mcmByChain map[uint64]string +} + +func (m *migrateHybridPoolRemoteMockReader) GetTimelockRef(_ cldf.Environment, chainSelector uint64, _ mcms.Input) (cldf_datastore.AddressRef, error) { + return cldf_datastore.AddressRef{ + ChainSelector: chainSelector, + Address: m.timelockByChain[chainSelector], + Type: "Timelock", + }, nil +} + +func (m *migrateHybridPoolRemoteMockReader) GetMCMSRef(_ cldf.Environment, chainSelector uint64, _ mcms.Input) (cldf_datastore.AddressRef, error) { + return cldf_datastore.AddressRef{ + ChainSelector: chainSelector, + Address: m.mcmByChain[chainSelector], + Type: "MCM", + }, nil +} + +func (m *migrateHybridPoolRemoteMockReader) GetChainMetadata(_ cldf.Environment, chainSelector uint64, _ mcms.Input) (mcms_types.ChainMetadata, error) { + return mcms_types.ChainMetadata{ + StartingOpCount: 1, + MCMAddress: m.mcmByChain[chainSelector], + }, nil +} + +type migrateHybridPoolRemoteFixture struct { + env *cldf.Environment + hubSelector uint64 + remoteSelector uint64 + hubChain evm.Chain + remoteChain evm.Chain + + hubTokenAddress common.Address + hubPoolAddress common.Address + hubPool *hybrid_with_external_minter_token_pool_bindings.HybridWithExternalMinterTokenPool + oldRemotePoolAddress common.Address + newRemotePoolAddress common.Address + remoteTokenAddress common.Address + remoteChainSupply *big.Int + remoteTARAddress common.Address + remoteTAR *token_admin_registry_bindings.TokenAdminRegistry + + reader *migrateHybridPoolRemoteMockReader + registry *core_changesets.MCMSReaderRegistry +} + +func newMigrateHybridPoolRemoteFixture(t *testing.T) *migrateHybridPoolRemoteFixture { + return newMigrateHybridPoolRemoteFixtureWithPreMint(t, big.NewInt(0)) +} + +func newMigrateHybridPoolRemoteFixtureWithPreMint(t *testing.T, remotePreMint *big.Int) *migrateHybridPoolRemoteFixture { + t.Helper() + + hubSelector := chainsel.ETHEREUM_MAINNET.Selector + remoteSelector := chainsel.ETHEREUM_MAINNET_BASE_1.Selector + + e, err := environment.New(t.Context(), + environment.WithEVMSimulated(t, []uint64{hubSelector, remoteSelector}), + ) + require.NoError(t, err) + + hubChain := e.BlockChains.EVMChains()[hubSelector] + remoteChain := e.BlockChains.EVMChains()[remoteSelector] + + hubTokenAddress, hubTokenGovernorAddress := deployTokenAndGovernor(t, hubChain, "HUB", big.NewInt(0)) + remoteTokenAddress, remoteTokenGovernorAddress := deployTokenAndGovernor(t, remoteChain, "REM", remotePreMint) + + hubPoolAddress, tx, hubPool, err := hybrid_with_external_minter_token_pool_bindings.DeployHybridWithExternalMinterTokenPool( + hubChain.DeployerKey, + hubChain.Client, + hubTokenGovernorAddress, + hubTokenAddress, + 18, + nil, + common.HexToAddress("0x0000000000000000000000000000000000000011"), + common.HexToAddress("0x0000000000000000000000000000000000000022"), + ) + require.NoError(t, err) + _, err = hubChain.Confirm(tx) + require.NoError(t, err) + + oldRemotePoolAddress, tx, _, err := lock_release_token_pool_bindings.DeployLockReleaseTokenPool( + remoteChain.DeployerKey, + remoteChain.Client, + remoteTokenAddress, + 18, + nil, + common.HexToAddress("0x0000000000000000000000000000000000000011"), + true, + common.HexToAddress("0x0000000000000000000000000000000000000022"), + ) + require.NoError(t, err) + _, err = remoteChain.Confirm(tx) + require.NoError(t, err) + + newRemotePoolAddress, tx, _, err := burn_mint_with_external_minter_token_pool_bindings.DeployBurnMintWithExternalMinterTokenPool( + remoteChain.DeployerKey, + remoteChain.Client, + remoteTokenGovernorAddress, + remoteTokenAddress, + 18, + nil, + common.HexToAddress("0x0000000000000000000000000000000000000011"), + common.HexToAddress("0x0000000000000000000000000000000000000022"), + ) + require.NoError(t, err) + _, err = remoteChain.Confirm(tx) + require.NoError(t, err) + + remoteTARAddress, tx, remoteTAR, err := token_admin_registry_bindings.DeployTokenAdminRegistry(remoteChain.DeployerKey, remoteChain.Client) + require.NoError(t, err) + _, err = remoteChain.Confirm(tx) + require.NoError(t, err) + + tx, err = remoteTAR.ProposeAdministrator(remoteChain.DeployerKey, remoteTokenAddress, remoteChain.DeployerKey.From) + require.NoError(t, err) + _, err = remoteChain.Confirm(tx) + require.NoError(t, err) + tx, err = remoteTAR.AcceptAdminRole(remoteChain.DeployerKey, remoteTokenAddress) + require.NoError(t, err) + _, err = remoteChain.Confirm(tx) + require.NoError(t, err) + + reader := &migrateHybridPoolRemoteMockReader{ + timelockByChain: map[uint64]string{ + hubSelector: hubChain.DeployerKey.From.Hex(), + remoteSelector: remoteChain.DeployerKey.From.Hex(), + }, + mcmByChain: map[uint64]string{ + hubSelector: "0x00000000000000000000000000000000000000a1", + remoteSelector: "0x00000000000000000000000000000000000000b2", + }, + } + registry := &core_changesets.MCMSReaderRegistry{} + registry.RegisterMCMSReader(chainsel.FamilyEVM, reader) + + // Register the TAR in the datastore so the changeset can resolve it. + ds := cldf_datastore.NewMemoryDataStore() + require.NoError(t, ds.Addresses().Add(cldf_datastore.AddressRef{ + ChainSelector: remoteSelector, + Type: cldf_datastore.ContractType(tar_ops.ContractType), + Version: tar_ops.Version, + Address: remoteTARAddress.Hex(), + })) + e.DataStore = ds.Seal() + + return &migrateHybridPoolRemoteFixture{ + env: e, + hubSelector: hubSelector, + remoteSelector: remoteSelector, + hubChain: hubChain, + remoteChain: remoteChain, + hubTokenAddress: hubTokenAddress, + hubPoolAddress: hubPoolAddress, + hubPool: hubPool, + oldRemotePoolAddress: oldRemotePoolAddress, + newRemotePoolAddress: newRemotePoolAddress, + remoteTokenAddress: remoteTokenAddress, + remoteChainSupply: new(big.Int).Set(remotePreMint), + remoteTARAddress: remoteTARAddress, + remoteTAR: remoteTAR, + reader: reader, + registry: registry, + } +} + +func deployTokenAndGovernor(t *testing.T, chain evm.Chain, symbolSuffix string, preMint *big.Int) (common.Address, common.Address) { + t.Helper() + + preMintCopy := new(big.Int).Set(preMint) + tokenAddress, tx, _, err := burn_mint_erc20_bindings.DeployBurnMintERC20( + chain.DeployerKey, + chain.Client, + "Token"+symbolSuffix, + "T"+symbolSuffix, + 18, + big.NewInt(1_000_000_000_000_000_000), + preMintCopy, + ) + require.NoError(t, err) + _, err = chain.Confirm(tx) + require.NoError(t, err) + + tokenGovernorAddress, tx, _, err := token_governor_bindings.DeployTokenGovernor( + chain.DeployerKey, + chain.Client, + tokenAddress, + big.NewInt(0), + chain.DeployerKey.From, + ) + require.NoError(t, err) + _, err = chain.Confirm(tx) + require.NoError(t, err) + + return tokenAddress, tokenGovernorAddress +} + +func (f *migrateHybridPoolRemoteFixture) changeset(registry *core_changesets.MCMSReaderRegistry) cldf.ChangeSetV2[v1_6_0_changesets.MigrateHybridPoolRemoteConfig] { + return v1_6_0_changesets.MigrateHybridPoolRemote(registry) +} + +func (f *migrateHybridPoolRemoteFixture) validConfig(targetGroup uint8) v1_6_0_changesets.MigrateHybridPoolRemoteConfig { + return v1_6_0_changesets.MigrateHybridPoolRemoteConfig{ + HubChainSelector: f.hubSelector, + HubPoolAddress: f.hubPoolAddress, + RemoteChainSelector: f.remoteSelector, + NewRemotePoolAddress: f.newRemotePoolAddress, + OldRemotePoolAddress: f.oldRemotePoolAddress, + TargetGroup: targetGroup, + RemoteTokenAddress: f.remoteTokenAddress, + MCMS: mcms.Input{ + TimelockAction: mcms_types.TimelockActionSchedule, + ValidUntil: uint32(time.Now().UTC().Add(24 * time.Hour).Unix()), + TimelockDelay: mcms_types.MustParseDuration("1h"), + Description: "migrate token pool", + }, + } +} + +func (f *migrateHybridPoolRemoteFixture) addHubRemotePool(t *testing.T, pool common.Address) { + t.Helper() + + isSupported, err := f.hubPool.IsSupportedChain(&bind.CallOpts{Context: t.Context()}, f.remoteSelector) + require.NoError(t, err) + if !isSupported { + tx, err := f.hubPool.ApplyChainUpdates( + f.hubChain.DeployerKey, + nil, + []hybrid_with_external_minter_token_pool_bindings.TokenPoolChainUpdate{ + { + RemoteChainSelector: f.remoteSelector, + RemotePoolAddresses: [][]byte{common.LeftPadBytes(pool.Bytes(), 32)}, + RemoteTokenAddress: common.LeftPadBytes(f.remoteTokenAddress.Bytes(), 32), + OutboundRateLimiterConfig: hybrid_with_external_minter_token_pool_bindings.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(0), + Rate: big.NewInt(0), + }, + InboundRateLimiterConfig: hybrid_with_external_minter_token_pool_bindings.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(0), + Rate: big.NewInt(0), + }, + }, + }, + ) + require.NoError(t, err) + _, err = f.hubChain.Confirm(tx) + require.NoError(t, err) + return + } + + tx, err := f.hubPool.AddRemotePool( + f.hubChain.DeployerKey, + f.remoteSelector, + common.LeftPadBytes(pool.Bytes(), 32), + ) + require.NoError(t, err) + _, err = f.hubChain.Confirm(tx) + require.NoError(t, err) +} + +func (f *migrateHybridPoolRemoteFixture) setTARPool(t *testing.T, pool common.Address) { + t.Helper() + tx, err := f.remoteTAR.SetPool(f.remoteChain.DeployerKey, f.remoteTokenAddress, pool) + require.NoError(t, err) + _, err = f.remoteChain.Confirm(tx) + require.NoError(t, err) +} + +func TestMigrateHybridPoolRemote_VerifyPreconditions_Valid(t *testing.T) { + fixture := newMigrateHybridPoolRemoteFixture(t) + fixture.addHubRemotePool(t, fixture.oldRemotePoolAddress) + fixture.setTARPool(t, fixture.oldRemotePoolAddress) + + cs := fixture.changeset(fixture.registry) + require.NoError(t, cs.VerifyPreconditions(*fixture.env, fixture.validConfig(0))) +} + +func TestMigrateHybridPoolRemote_VerifyPreconditions_InvalidInputs(t *testing.T) { + fixture := newMigrateHybridPoolRemoteFixture(t) + fixture.addHubRemotePool(t, fixture.oldRemotePoolAddress) + fixture.setTARPool(t, fixture.oldRemotePoolAddress) + cs := fixture.changeset(fixture.registry) + + t.Run("old equals new pool address", func(t *testing.T) { + cfg := fixture.validConfig(0) + cfg.OldRemotePoolAddress = cfg.NewRemotePoolAddress + err := cs.VerifyPreconditions(*fixture.env, cfg) + require.ErrorContains(t, err, "must be different") + }) + + t.Run("invalid target group", func(t *testing.T) { + cfg := fixture.validConfig(5) + err := cs.VerifyPreconditions(*fixture.env, cfg) + require.ErrorContains(t, err, "target group must be 0 or 1") + }) + + t.Run("same hub and remote selector", func(t *testing.T) { + cfg := fixture.validConfig(0) + cfg.RemoteChainSelector = cfg.HubChainSelector + err := cs.VerifyPreconditions(*fixture.env, cfg) + require.ErrorContains(t, err, "must be different") + }) + + t.Run("zero hub pool address", func(t *testing.T) { + cfg := fixture.validConfig(0) + cfg.HubPoolAddress = common.Address{} + err := cs.VerifyPreconditions(*fixture.env, cfg) + require.ErrorContains(t, err, "hub pool address cannot be the zero address") + }) +} + +func TestMigrateHybridPoolRemote_VerifyPreconditions_OnChainStateChecks(t *testing.T) { + tests := []struct { + name string + mutateConfig func(*v1_6_0_changesets.MigrateHybridPoolRemoteConfig, *migrateHybridPoolRemoteFixture) + expectedErr string + }{ + { + name: "hub pool type and version mismatch", + mutateConfig: func(cfg *v1_6_0_changesets.MigrateHybridPoolRemoteConfig, f *migrateHybridPoolRemoteFixture) { + cfg.HubPoolAddress = f.hubTokenAddress + }, + expectedErr: "failed to read typeAndVersion for hub pool", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fixture := newMigrateHybridPoolRemoteFixture(t) + fixture.addHubRemotePool(t, fixture.oldRemotePoolAddress) + fixture.setTARPool(t, fixture.oldRemotePoolAddress) + + cs := fixture.changeset(fixture.registry) + cfg := fixture.validConfig(0) + tt.mutateConfig(&cfg, fixture) + err := cs.VerifyPreconditions(*fixture.env, cfg) + require.ErrorContains(t, err, tt.expectedErr) + }) + } +} + +func TestMigrateHybridPoolRemote_VerifyPreconditions_MCMSReaderErrors(t *testing.T) { + tests := []struct { + name string + makeReader func(*migrateHybridPoolRemoteFixture) *migrateHybridPoolRemoteMockReader + expectedErrs []string + }{ + { + name: "missing remote MCMS ref", + makeReader: func(f *migrateHybridPoolRemoteFixture) *migrateHybridPoolRemoteMockReader { + return &migrateHybridPoolRemoteMockReader{ + timelockByChain: f.reader.timelockByChain, + mcmByChain: map[uint64]string{ + f.hubSelector: f.reader.mcmByChain[f.hubSelector], + }, + } + }, + expectedErrs: []string{"missing MCMS for remote chain"}, + }, + { + name: "hub owner mismatch", + makeReader: func(f *migrateHybridPoolRemoteFixture) *migrateHybridPoolRemoteMockReader { + return &migrateHybridPoolRemoteMockReader{ + timelockByChain: map[uint64]string{ + f.hubSelector: common.HexToAddress("0x0000000000000000000000000000000000000c01").Hex(), + f.remoteSelector: f.reader.timelockByChain[f.remoteSelector], + }, + mcmByChain: f.reader.mcmByChain, + } + }, + expectedErrs: []string{"owner", "does not match timelock"}, + }, + { + name: "TAR admin mismatch", + makeReader: func(f *migrateHybridPoolRemoteFixture) *migrateHybridPoolRemoteMockReader { + return &migrateHybridPoolRemoteMockReader{ + timelockByChain: map[uint64]string{ + f.hubSelector: f.reader.timelockByChain[f.hubSelector], + f.remoteSelector: common.HexToAddress("0x0000000000000000000000000000000000000d01").Hex(), + }, + mcmByChain: f.reader.mcmByChain, + } + }, + expectedErrs: []string{"TAR administrator", "does not match timelock"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fixture := newMigrateHybridPoolRemoteFixture(t) + fixture.addHubRemotePool(t, fixture.oldRemotePoolAddress) + fixture.setTARPool(t, fixture.oldRemotePoolAddress) + + badReader := tt.makeReader(fixture) + badRegistry := &core_changesets.MCMSReaderRegistry{} + badRegistry.RegisterMCMSReader(chainsel.FamilyEVM, badReader) + + cs := fixture.changeset(badRegistry) + err := cs.VerifyPreconditions(*fixture.env, fixture.validConfig(0)) + for _, expected := range tt.expectedErrs { + require.ErrorContains(t, err, expected) + } + }) + } +} + +func TestMigrateHybridPoolRemote_VerifyPreconditions_UnsupportedRemoteChainOnHub(t *testing.T) { + fixture := newMigrateHybridPoolRemoteFixture(t) + fixture.setTARPool(t, fixture.oldRemotePoolAddress) + + cs := fixture.changeset(fixture.registry) + err := cs.VerifyPreconditions(*fixture.env, fixture.validConfig(0)) + require.ErrorContains(t, err, "is not supported on hub pool") +} + +func TestMigrateHybridPoolRemote_VerifyPreconditions_UnexpectedHubRemotePool(t *testing.T) { + fixture := newMigrateHybridPoolRemoteFixture(t) + fixture.addHubRemotePool(t, fixture.oldRemotePoolAddress) + fixture.setTARPool(t, fixture.oldRemotePoolAddress) + + tx, err := fixture.hubPool.AddRemotePool( + fixture.hubChain.DeployerKey, + fixture.remoteSelector, + common.LeftPadBytes(common.HexToAddress("0x0000000000000000000000000000000000000abc").Bytes(), 32), + ) + require.NoError(t, err) + _, err = fixture.hubChain.Confirm(tx) + require.NoError(t, err) + + cs := fixture.changeset(fixture.registry) + err = cs.VerifyPreconditions(*fixture.env, fixture.validConfig(0)) + require.ErrorContains(t, err, "unexpected pool") +} + +func TestMigrateHybridPoolRemote_VerifyPreconditions_LockedTokensBound(t *testing.T) { + fixture := newMigrateHybridPoolRemoteFixtureWithPreMint(t, big.NewInt(100)) + fixture.addHubRemotePool(t, fixture.oldRemotePoolAddress) + fixture.setTARPool(t, fixture.oldRemotePoolAddress) + + cs := fixture.changeset(fixture.registry) + err := cs.VerifyPreconditions(*fixture.env, fixture.validConfig(1)) + require.ErrorContains(t, err, "exceeds locked token accounting") +} + +func TestMigrateHybridPoolRemote_Apply_FreshState(t *testing.T) { + fixture := newMigrateHybridPoolRemoteFixture(t) + fixture.addHubRemotePool(t, fixture.oldRemotePoolAddress) + fixture.setTARPool(t, fixture.oldRemotePoolAddress) + + cs := fixture.changeset(fixture.registry) + cfg := fixture.validConfig(1) + require.NoError(t, cs.VerifyPreconditions(*fixture.env, cfg)) + + out, err := cs.Apply(*fixture.env, cfg) + require.NoError(t, err) + require.NotNil(t, out.DataStore) + + sealedDS := out.DataStore.Seal() + require.True(t, v1_6_0_changesets.AddressRefExistsWithTypeVersion( + sealedDS, + fixture.hubSelector, + fixture.hubPoolAddress, + cldf_datastore.ContractType(v1_6_0_hybrid_pool_ops.ContractType), + v1_6_0_hybrid_pool_ops.Version, + )) + require.True(t, v1_6_0_changesets.AddressRefExistsWithTypeVersion( + sealedDS, + fixture.remoteSelector, + fixture.newRemotePoolAddress, + cldf_datastore.ContractType(v1_6_0_burn_mint_with_external_minter_token_pool_ops.ContractType), + v1_6_0_burn_mint_with_external_minter_token_pool_ops.Version, + )) +} + +func TestMigrateHybridPoolRemote_Apply_PartialState(t *testing.T) { + fixture := newMigrateHybridPoolRemoteFixture(t) + fixture.addHubRemotePool(t, fixture.oldRemotePoolAddress) + fixture.addHubRemotePool(t, fixture.newRemotePoolAddress) + fixture.setTARPool(t, fixture.oldRemotePoolAddress) + + cs := fixture.changeset(fixture.registry) + cfg := fixture.validConfig(1) + require.NoError(t, cs.VerifyPreconditions(*fixture.env, cfg)) + + out, err := cs.Apply(*fixture.env, cfg) + require.NoError(t, err) + require.NotNil(t, out.DataStore) +} + +func TestMigrateHybridPoolRemote_Apply_AlreadyComplete_NoProposal(t *testing.T) { + fixture := newMigrateHybridPoolRemoteFixture(t) + fixture.addHubRemotePool(t, fixture.newRemotePoolAddress) + fixture.setTARPool(t, fixture.newRemotePoolAddress) + + cs := fixture.changeset(fixture.registry) + cfg := fixture.validConfig(0) + require.NoError(t, cs.VerifyPreconditions(*fixture.env, cfg)) + + out, err := cs.Apply(*fixture.env, cfg) + require.NoError(t, err) + require.Len(t, out.MCMSTimelockProposals, 0) + + group, err := fixture.hubPool.GetGroup(&bind.CallOpts{Context: t.Context()}, fixture.remoteSelector) + require.NoError(t, err) + require.Equal(t, uint8(0), group) + + remotePools, err := fixture.hubPool.GetRemotePools(&bind.CallOpts{Context: t.Context()}, fixture.remoteSelector) + require.NoError(t, err) + require.True(t, containsPoolBytes(remotePools, common.LeftPadBytes(fixture.newRemotePoolAddress.Bytes(), 32))) + require.False(t, containsPoolBytes(remotePools, common.LeftPadBytes(fixture.oldRemotePoolAddress.Bytes(), 32))) + + tarCfg, err := fixture.remoteTAR.GetTokenConfig(&bind.CallOpts{Context: t.Context()}, fixture.remoteTokenAddress) + require.NoError(t, err) + require.Equal(t, fixture.newRemotePoolAddress, tarCfg.TokenPool) +} + +func containsPoolBytes(pools [][]byte, target []byte) bool { + for _, pool := range pools { + if bytes.Equal(pool, target) { + return true + } + } + return false +} diff --git a/chains/evm/deployment/v1_6_0/operations/hybrid_with_external_minter_token_pool/hybrid_with_external_minter_token_pool.go b/chains/evm/deployment/v1_6_0/operations/hybrid_with_external_minter_token_pool/hybrid_with_external_minter_token_pool.go index d5a3882412..840bde75ab 100644 --- a/chains/evm/deployment/v1_6_0/operations/hybrid_with_external_minter_token_pool/hybrid_with_external_minter_token_pool.go +++ b/chains/evm/deployment/v1_6_0/operations/hybrid_with_external_minter_token_pool/hybrid_with_external_minter_token_pool.go @@ -1,7 +1,11 @@ package hybrid_with_external_minter_token_pool import ( + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/smartcontractkit/ccip-contract-examples/chains/evm/gobindings/generated/latest/hybrid_with_external_minter_token_pool" "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/utils/operations/contract" @@ -23,6 +27,20 @@ type ConstructorArgs struct { Router common.Address // The router address } +type AddRemotePoolArgs struct { + RemoteChainSelector uint64 + RemotePoolAddress []byte +} + +type RemoveRemotePoolArgs struct { + RemoteChainSelector uint64 + RemotePoolAddress []byte +} + +type UpdateGroupsArgs struct { + GroupUpdates []hybrid_with_external_minter_token_pool.HybridTokenPoolAbstractGroupUpdate +} + var Deploy = contract.NewDeploy(contract.DeployParams[ConstructorArgs]{ Name: "hybrid_with_external_minter_token_pool:deploy", Version: Version, @@ -35,3 +53,78 @@ var Deploy = contract.NewDeploy(contract.DeployParams[ConstructorArgs]{ }, Validate: func(args ConstructorArgs) error { return nil }, }) + +var GetGroup = contract.NewRead(contract.ReadParams[uint64, uint8, *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool]{ + Name: "hybrid_with_external_minter_token_pool:get-group", + Version: Version, + Description: "Gets the group assigned to a remote chain selector on HybridWithExternalMinterTokenPool", + ContractType: ContractType, + NewContract: hybrid_with_external_minter_token_pool.NewHybridWithExternalMinterTokenPool, + CallContract: func(c *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool, opts *bind.CallOpts, remoteChainSelector uint64) (uint8, error) { + return c.GetGroup(opts, remoteChainSelector) + }, +}) + +var GetRemotePools = contract.NewRead(contract.ReadParams[uint64, [][]byte, *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool]{ + Name: "hybrid_with_external_minter_token_pool:get-remote-pools", + Version: Version, + Description: "Gets the registered remote pool addresses for a chain selector on HybridWithExternalMinterTokenPool", + ContractType: ContractType, + NewContract: hybrid_with_external_minter_token_pool.NewHybridWithExternalMinterTokenPool, + CallContract: func(c *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool, opts *bind.CallOpts, remoteChainSelector uint64) ([][]byte, error) { + return c.GetRemotePools(opts, remoteChainSelector) + }, +}) + +var GetLockedTokens = contract.NewRead(contract.ReadParams[struct{}, *big.Int, *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool]{ + Name: "hybrid_with_external_minter_token_pool:get-locked-tokens", + Version: Version, + Description: "Gets total locked token accounting from HybridWithExternalMinterTokenPool", + ContractType: ContractType, + NewContract: hybrid_with_external_minter_token_pool.NewHybridWithExternalMinterTokenPool, + CallContract: func(c *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool, opts *bind.CallOpts, _ struct{}) (*big.Int, error) { + return c.GetLockedTokens(opts) + }, +}) + +var AddRemotePool = contract.NewWrite(contract.WriteParams[AddRemotePoolArgs, *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool]{ + Name: "hybrid_with_external_minter_token_pool:add-remote-pool", + Version: Version, + Description: "Adds a remote pool for a chain selector on HybridWithExternalMinterTokenPool", + ContractType: ContractType, + ContractABI: hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPoolABI, + NewContract: hybrid_with_external_minter_token_pool.NewHybridWithExternalMinterTokenPool, + IsAllowedCaller: contract.OnlyOwner[*hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool, AddRemotePoolArgs], + Validate: func(AddRemotePoolArgs) error { return nil }, + CallContract: func(c *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool, opts *bind.TransactOpts, args AddRemotePoolArgs) (*types.Transaction, error) { + return c.AddRemotePool(opts, args.RemoteChainSelector, args.RemotePoolAddress) + }, +}) + +var RemoveRemotePool = contract.NewWrite(contract.WriteParams[RemoveRemotePoolArgs, *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool]{ + Name: "hybrid_with_external_minter_token_pool:remove-remote-pool", + Version: Version, + Description: "Removes a remote pool for a chain selector on HybridWithExternalMinterTokenPool", + ContractType: ContractType, + ContractABI: hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPoolABI, + NewContract: hybrid_with_external_minter_token_pool.NewHybridWithExternalMinterTokenPool, + IsAllowedCaller: contract.OnlyOwner[*hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool, RemoveRemotePoolArgs], + Validate: func(RemoveRemotePoolArgs) error { return nil }, + CallContract: func(c *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool, opts *bind.TransactOpts, args RemoveRemotePoolArgs) (*types.Transaction, error) { + return c.RemoveRemotePool(opts, args.RemoteChainSelector, args.RemotePoolAddress) + }, +}) + +var UpdateGroups = contract.NewWrite(contract.WriteParams[UpdateGroupsArgs, *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool]{ + Name: "hybrid_with_external_minter_token_pool:update-groups", + Version: Version, + Description: "Updates remote chain groups on HybridWithExternalMinterTokenPool", + ContractType: ContractType, + ContractABI: hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPoolABI, + NewContract: hybrid_with_external_minter_token_pool.NewHybridWithExternalMinterTokenPool, + IsAllowedCaller: contract.OnlyOwner[*hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool, UpdateGroupsArgs], + Validate: func(UpdateGroupsArgs) error { return nil }, + CallContract: func(c *hybrid_with_external_minter_token_pool.HybridWithExternalMinterTokenPool, opts *bind.TransactOpts, args UpdateGroupsArgs) (*types.Transaction, error) { + return c.UpdateGroups(opts, args.GroupUpdates) + }, +}) diff --git a/chains/evm/deployment/v1_6_0/sequences/migrate_hybrid_pool_remote.go b/chains/evm/deployment/v1_6_0/sequences/migrate_hybrid_pool_remote.go new file mode 100644 index 0000000000..c17d35671a --- /dev/null +++ b/chains/evm/deployment/v1_6_0/sequences/migrate_hybrid_pool_remote.go @@ -0,0 +1,214 @@ +package sequences + +import ( + "bytes" + "errors" + "fmt" + "math/big" + "slices" + + "github.com/Masterminds/semver/v3" + "github.com/ethereum/go-ethereum/common" + cldf_chain "github.com/smartcontractkit/chainlink-deployments-framework/chain" + cldf_ops "github.com/smartcontractkit/chainlink-deployments-framework/operations" + mcms_types "github.com/smartcontractkit/mcms/types" + + hybrid_pool_binding "github.com/smartcontractkit/ccip-contract-examples/chains/evm/gobindings/generated/latest/hybrid_with_external_minter_token_pool" + evm_contract "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/utils/operations/contract" + tar_ops "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_5_0/operations/token_admin_registry" + hybrid_pool_ops "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/hybrid_with_external_minter_token_pool" + "github.com/smartcontractkit/chainlink-ccip/deployment/utils/sequences" +) + +type MigrateHybridPoolRemoteInput struct { + HubChainSelector uint64 + HubPoolAddress common.Address + RemoteChainSelector uint64 + NewRemotePoolAddress common.Address + OldRemotePoolAddress common.Address + RemoteChainSupply *big.Int + TargetGroup uint8 + RemoteTARAddress common.Address + RemoteTokenAddress common.Address +} + +var MigrateHybridPoolRemote = cldf_ops.NewSequence( + "migrate-hybrid-pool-remote", + semver.MustParse("1.6.0"), + "Migrates a remote chain token pool from lock-release to burn-mint on a hybrid hub pool", + func(b cldf_ops.Bundle, chains cldf_chain.BlockChains, input MigrateHybridPoolRemoteInput) (sequences.OnChainOutput, error) { + if input.RemoteChainSupply == nil { + return sequences.OnChainOutput{}, errors.New("RemoteChainSupply must not be nil") + } + + hubChain, ok := chains.EVMChains()[input.HubChainSelector] + if !ok { + return sequences.OnChainOutput{}, fmt.Errorf("hub chain with selector %d not defined", input.HubChainSelector) + } + remoteChain, ok := chains.EVMChains()[input.RemoteChainSelector] + if !ok { + return sequences.OnChainOutput{}, fmt.Errorf("remote chain with selector %d not defined", input.RemoteChainSelector) + } + + oldPoolBytes := common.LeftPadBytes(input.OldRemotePoolAddress.Bytes(), 32) + newPoolBytes := common.LeftPadBytes(input.NewRemotePoolAddress.Bytes(), 32) + + hubWrites := make([]evm_contract.WriteOutput, 0, 3) + + remotePoolsReport, err := cldf_ops.ExecuteOperation(b, hybrid_pool_ops.GetRemotePools, hubChain, evm_contract.FunctionInput[uint64]{ + ChainSelector: input.HubChainSelector, + Address: input.HubPoolAddress, + Args: input.RemoteChainSelector, + }) + if err != nil { + return sequences.OnChainOutput{}, fmt.Errorf("failed to read hub remote pools for remote chain %d: %w", input.RemoteChainSelector, err) + } + for _, pool := range remotePoolsReport.Output { + if !bytes.Equal(pool, oldPoolBytes) && !bytes.Equal(pool, newPoolBytes) { + return sequences.OnChainOutput{}, fmt.Errorf( + "unexpected pool %x in remote pool set for chain %d", + pool, + input.RemoteChainSelector, + ) + } + } + + oldPresent := containsBytes(remotePoolsReport.Output, oldPoolBytes) + newPresent := containsBytes(remotePoolsReport.Output, newPoolBytes) + if !oldPresent && !newPresent { + return sequences.OnChainOutput{}, fmt.Errorf( + "neither old pool %s nor new pool %s registered for chain %d", + input.OldRemotePoolAddress, + input.NewRemotePoolAddress, + input.RemoteChainSelector, + ) + } + + if !newPresent { + addReport, err := cldf_ops.ExecuteOperation(b, hybrid_pool_ops.AddRemotePool, hubChain, evm_contract.FunctionInput[hybrid_pool_ops.AddRemotePoolArgs]{ + ChainSelector: input.HubChainSelector, + Address: input.HubPoolAddress, + Args: hybrid_pool_ops.AddRemotePoolArgs{ + RemoteChainSelector: input.RemoteChainSelector, + RemotePoolAddress: newPoolBytes, + }, + }) + if err != nil { + return sequences.OnChainOutput{}, fmt.Errorf("failed to add new remote pool %s on hub chain %d: %w", input.NewRemotePoolAddress, input.HubChainSelector, err) + } + hubWrites = append(hubWrites, addReport.Output) + } + + if oldPresent { + removeReport, err := cldf_ops.ExecuteOperation(b, hybrid_pool_ops.RemoveRemotePool, hubChain, evm_contract.FunctionInput[hybrid_pool_ops.RemoveRemotePoolArgs]{ + ChainSelector: input.HubChainSelector, + Address: input.HubPoolAddress, + Args: hybrid_pool_ops.RemoveRemotePoolArgs{ + RemoteChainSelector: input.RemoteChainSelector, + RemotePoolAddress: oldPoolBytes, + }, + }) + if err != nil { + return sequences.OnChainOutput{}, fmt.Errorf("failed to remove old remote pool %s on hub chain %d: %w", input.OldRemotePoolAddress, input.HubChainSelector, err) + } + hubWrites = append(hubWrites, removeReport.Output) + } + + groupReport, err := cldf_ops.ExecuteOperation(b, hybrid_pool_ops.GetGroup, hubChain, evm_contract.FunctionInput[uint64]{ + ChainSelector: input.HubChainSelector, + Address: input.HubPoolAddress, + Args: input.RemoteChainSelector, + }) + if err != nil { + return sequences.OnChainOutput{}, fmt.Errorf("failed to read hub group for remote chain %d: %w", input.RemoteChainSelector, err) + } + if groupReport.Output != input.TargetGroup { + updateReport, err := cldf_ops.ExecuteOperation(b, hybrid_pool_ops.UpdateGroups, hubChain, evm_contract.FunctionInput[hybrid_pool_ops.UpdateGroupsArgs]{ + ChainSelector: input.HubChainSelector, + Address: input.HubPoolAddress, + Args: hybrid_pool_ops.UpdateGroupsArgs{ + GroupUpdates: []hybrid_pool_binding.HybridTokenPoolAbstractGroupUpdate{ + { + RemoteChainSelector: input.RemoteChainSelector, + Group: input.TargetGroup, + RemoteChainSupply: input.RemoteChainSupply, + }, + }, + }, + }) + if err != nil { + return sequences.OnChainOutput{}, fmt.Errorf("failed to update group for remote chain %d on hub chain %d: %w", input.RemoteChainSelector, input.HubChainSelector, err) + } + hubWrites = append(hubWrites, updateReport.Output) + } + + remoteWrites := make([]evm_contract.WriteOutput, 0, 1) + + tarConfigReport, err := cldf_ops.ExecuteOperation(b, tar_ops.GetTokenConfig, remoteChain, evm_contract.FunctionInput[common.Address]{ + ChainSelector: input.RemoteChainSelector, + Address: input.RemoteTARAddress, + Args: input.RemoteTokenAddress, + }) + if err != nil { + return sequences.OnChainOutput{}, fmt.Errorf("failed to read token config from TAR %s on chain %d: %w", input.RemoteTARAddress, input.RemoteChainSelector, err) + } + + currentPool := tarConfigReport.Output.TokenPool + if currentPool == (common.Address{}) { + return sequences.OnChainOutput{}, fmt.Errorf("token %s has no pool set in TAR on chain %d", input.RemoteTokenAddress, input.RemoteChainSelector) + } + if currentPool != input.NewRemotePoolAddress { + if currentPool != input.OldRemotePoolAddress { + return sequences.OnChainOutput{}, fmt.Errorf( + "TAR pool %s is neither old %s nor new %s for token %s on chain %d", + currentPool, + input.OldRemotePoolAddress, + input.NewRemotePoolAddress, + input.RemoteTokenAddress, + input.RemoteChainSelector, + ) + } + + setPoolReport, err := cldf_ops.ExecuteOperation(b, tar_ops.SetPool, remoteChain, evm_contract.FunctionInput[tar_ops.SetPoolArgs]{ + ChainSelector: input.RemoteChainSelector, + Address: input.RemoteTARAddress, + Args: tar_ops.SetPoolArgs{ + TokenAddress: input.RemoteTokenAddress, + TokenPoolAddress: input.NewRemotePoolAddress, + }, + }) + if err != nil { + return sequences.OnChainOutput{}, fmt.Errorf("failed to set TAR pool for token %s on chain %d: %w", input.RemoteTokenAddress, input.RemoteChainSelector, err) + } + remoteWrites = append(remoteWrites, setPoolReport.Output) + } + + batchOps := make([]mcms_types.BatchOperation, 0, 2) + if len(hubWrites) > 0 { + hubBatch, err := evm_contract.NewBatchOperationFromWrites(hubWrites) + if err != nil { + return sequences.OnChainOutput{}, fmt.Errorf("failed to build hub chain batch operation: %w", err) + } + if len(hubBatch.Transactions) > 0 { + batchOps = append(batchOps, hubBatch) + } + } + if len(remoteWrites) > 0 { + remoteBatch, err := evm_contract.NewBatchOperationFromWrites(remoteWrites) + if err != nil { + return sequences.OnChainOutput{}, fmt.Errorf("failed to build remote chain batch operation: %w", err) + } + if len(remoteBatch.Transactions) > 0 { + batchOps = append(batchOps, remoteBatch) + } + } + + return sequences.OnChainOutput{BatchOps: batchOps}, nil + }, +) + +func containsBytes(haystack [][]byte, needle []byte) bool { + return slices.ContainsFunc(haystack, func(candidate []byte) bool { + return bytes.Equal(candidate, needle) + }) +}