lang: Add support for multiple test suites with separate local validators (#1681)

This commit is contained in:
Paul 2022-03-30 17:17:54 -04:00 committed by GitHub
parent 7803acb995
commit 470e902f48
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 983 additions and 144 deletions

View File

@ -381,6 +381,8 @@ jobs:
path: tests/validator-clone
- cmd: cd tests/cpi-returns && anchor test --skip-lint
path: tests/cpi-returns
- cmd: cd tests/multiple-suites && anchor test --skip-lint
path: tests/multiple-suites
steps:
- uses: actions/checkout@v2
- uses: ./.github/actions/setup/

2
.gitignore vendored
View File

@ -12,6 +12,8 @@ examples/*/Cargo.lock
examples/**/Cargo.lock
tests/*/Cargo.lock
tests/**/Cargo.lock
tests/*/yarn.lock
tests/**/yarn.lock
.DS_Store
docs/yarn.lock
ts/docs/

View File

@ -12,6 +12,7 @@ The minor version will be incremented upon a breaking change and the patch versi
### Features
* lang: Add support for multiple test suites with separate local validators ([#1681](https://github.com/project-serum/anchor/pull/1681)).
* lang: Add return values to CPI client. ([#1598](https://github.com/project-serum/anchor/pull/1598)).
* avm: New `avm update` command to update the Anchor CLI to the latest version ([#1670](https://github.com/project-serum/anchor/pull/1670)).

View File

@ -39,6 +39,6 @@ reqwest = { version = "0.11.4", features = ["multipart", "blocking"] }
tokio = "1.0"
pathdiff = "0.2.0"
cargo_toml = "0.9.2"
walkdir = "2"
walkdir = "2.3.2"
chrono = "0.4.19"
portpicker = "0.1.1"

View File

@ -1,3 +1,4 @@
use crate::is_hidden;
use anchor_client::Cluster;
use anchor_syn::idl::Idl;
use anyhow::{anyhow, Context, Error, Result};
@ -7,7 +8,7 @@ use serde::{Deserialize, Serialize};
use solana_cli_config::{Config as SolanaConfig, CONFIG_FILE};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signer};
use std::collections::BTreeMap;
use std::collections::{BTreeMap, HashMap};
use std::convert::TryFrom;
use std::fs::{self, File};
use std::io;
@ -16,6 +17,11 @@ use std::ops::Deref;
use std::path::Path;
use std::path::PathBuf;
use std::str::FromStr;
use walkdir::WalkDir;
pub trait Merge: Sized {
fn merge(&mut self, _other: Self) {}
}
#[derive(Default, Debug, Parser)]
pub struct ConfigOverride {
@ -27,6 +33,7 @@ pub struct ConfigOverride {
pub wallet: Option<WalletPath>,
}
#[derive(Debug)]
pub struct WithPath<T> {
inner: T,
path: PathBuf,
@ -267,7 +274,11 @@ pub struct Config {
pub programs: ProgramsConfig,
pub scripts: ScriptsConfig,
pub workspace: WorkspaceConfig,
pub test: Option<Test>,
// Separate entry next to test_config because
// "anchor localnet" only has access to the Anchor.toml,
// not the Test.toml files
pub test_validator: Option<TestValidator>,
pub test_config: Option<TestConfig>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
@ -324,6 +335,11 @@ pub struct BuildConfig {
}
impl Config {
fn with_test_config(mut self, p: impl AsRef<Path>) -> Result<Self> {
self.test_config = TestConfig::discover(p)?;
Ok(self)
}
pub fn docker(&self) -> String {
let ver = self
.anchor_version
@ -377,7 +393,8 @@ impl Config {
fn from_path(p: impl AsRef<Path>) -> Result<Self> {
fs::read_to_string(&p)
.with_context(|| format!("Error reading the file with path: {}", p.as_ref().display()))?
.parse()
.parse::<Self>()?
.with_test_config(p.as_ref().parent().unwrap())
}
pub fn wallet_kp(&self) -> Result<Keypair> {
@ -396,7 +413,7 @@ struct _Config {
provider: Provider,
workspace: Option<WorkspaceConfig>,
scripts: Option<ScriptsConfig>,
test: Option<Test>,
test: Option<_TestValidator>,
}
#[derive(Debug, Serialize, Deserialize)]
@ -424,7 +441,7 @@ impl ToString for Config {
cluster: format!("{}", self.provider.cluster),
wallet: self.provider.wallet.to_string(),
},
test: self.test.clone(),
test: self.test_validator.clone().map(Into::into),
scripts: match self.scripts.is_empty() {
true => None,
false => Some(self.scripts.clone()),
@ -454,7 +471,8 @@ impl FromStr for Config {
wallet: shellexpand::tilde(&cfg.provider.wallet).parse()?,
},
scripts: cfg.scripts.unwrap_or_default(),
test: cfg.test,
test_validator: cfg.test.map(Into::into),
test_config: None,
programs: cfg.programs.map_or(Ok(BTreeMap::new()), deser_programs)?,
workspace: cfg.workspace.unwrap_or_default(),
})
@ -531,11 +549,235 @@ fn deser_programs(
.collect::<Result<BTreeMap<Cluster, BTreeMap<String, ProgramDeployment>>>>()
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Test {
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct TestValidator {
pub genesis: Option<Vec<GenesisEntry>>,
pub validator: Option<Validator>,
pub startup_wait: i32,
pub shutdown_wait: i32,
}
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct _TestValidator {
#[serde(skip_serializing_if = "Option::is_none")]
pub genesis: Option<Vec<GenesisEntry>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub validator: Option<_Validator>,
#[serde(skip_serializing_if = "Option::is_none")]
pub startup_wait: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub shutdown_wait: Option<i32>,
}
pub const STARTUP_WAIT: i32 = 5000;
pub const SHUTDOWN_WAIT: i32 = 2000;
impl From<_TestValidator> for TestValidator {
fn from(_test_validator: _TestValidator) -> Self {
Self {
shutdown_wait: _test_validator.shutdown_wait.unwrap_or(SHUTDOWN_WAIT),
startup_wait: _test_validator.startup_wait.unwrap_or(STARTUP_WAIT),
genesis: _test_validator.genesis,
validator: _test_validator.validator.map(Into::into),
}
}
}
impl From<TestValidator> for _TestValidator {
fn from(test_validator: TestValidator) -> Self {
Self {
shutdown_wait: Some(test_validator.shutdown_wait),
startup_wait: Some(test_validator.startup_wait),
genesis: test_validator.genesis,
validator: test_validator.validator.map(Into::into),
}
}
}
#[derive(Debug, Clone)]
pub struct TestConfig {
pub test_suite_configs: HashMap<PathBuf, TestToml>,
}
impl Deref for TestConfig {
type Target = HashMap<PathBuf, TestToml>;
fn deref(&self) -> &Self::Target {
&self.test_suite_configs
}
}
impl TestConfig {
pub fn discover(root: impl AsRef<Path>) -> Result<Option<Self>> {
let walker = WalkDir::new(root).into_iter();
let mut test_suite_configs = HashMap::new();
for entry in walker.filter_entry(|e| !is_hidden(e)) {
let entry = entry?;
if entry.file_name() == "Test.toml" {
let test_toml = TestToml::from_path(entry.path())?;
test_suite_configs.insert(entry.path().into(), test_toml);
}
}
Ok(match test_suite_configs.is_empty() {
true => None,
false => Some(Self { test_suite_configs }),
})
}
}
// This file needs to have the same (sub)structure as Anchor.toml
// so it can be parsed as a base test file from an Anchor.toml
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct _TestToml {
pub extends: Option<Vec<String>>,
pub test: Option<_TestValidator>,
pub scripts: Option<ScriptsConfig>,
}
impl _TestToml {
fn from_path(path: impl AsRef<Path>) -> Result<Self, Error> {
let s = fs::read_to_string(&path)?;
let parsed_toml: Self = toml::from_str(&s)?;
let mut current_toml = _TestToml {
extends: None,
test: None,
scripts: None,
};
if let Some(bases) = &parsed_toml.extends {
for base in bases {
let mut canonical_base = base.clone();
canonical_base = canonicalize_filepath_from_origin(&canonical_base, &path)?;
current_toml.merge(_TestToml::from_path(&canonical_base)?);
}
}
current_toml.merge(parsed_toml);
if let Some(test) = &mut current_toml.test {
if let Some(genesis_programs) = &mut test.genesis {
for entry in genesis_programs {
entry.program = canonicalize_filepath_from_origin(&entry.program, &path)?;
}
}
if let Some(validator) = &mut test.validator {
if let Some(ledger_dir) = &mut validator.ledger {
*ledger_dir = canonicalize_filepath_from_origin(&ledger_dir, &path)?;
}
if let Some(accounts) = &mut validator.account {
for entry in accounts {
entry.filename = canonicalize_filepath_from_origin(&entry.filename, &path)?;
}
}
}
}
Ok(current_toml)
}
}
/// canonicalizes the `file_path` arg.
/// uses the `path` arg as the current dir
/// from which to turn the relative path
/// into a canonical one
fn canonicalize_filepath_from_origin(
file_path: impl AsRef<Path>,
path: impl AsRef<Path>,
) -> Result<String> {
let previous_dir = std::env::current_dir()?;
std::env::set_current_dir(path.as_ref().parent().unwrap())?;
let result = fs::canonicalize(file_path)?.display().to_string();
std::env::set_current_dir(previous_dir)?;
Ok(result)
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TestToml {
#[serde(skip_serializing_if = "Option::is_none")]
pub test: Option<TestValidator>,
pub scripts: ScriptsConfig,
}
impl TestToml {
pub fn from_path(p: impl AsRef<Path>) -> Result<Self> {
WithPath::new(_TestToml::from_path(&p)?, p.as_ref().into()).try_into()
}
}
impl Merge for _TestToml {
fn merge(&mut self, other: Self) {
let mut my_scripts = self.scripts.take();
match &mut my_scripts {
None => my_scripts = other.scripts,
Some(my_scripts) => {
if let Some(other_scripts) = other.scripts {
for (name, script) in other_scripts {
my_scripts.insert(name, script);
}
}
}
}
let mut my_test = self.test.take();
match &mut my_test {
Some(my_test) => {
if let Some(other_test) = other.test {
if let Some(startup_wait) = other_test.startup_wait {
my_test.startup_wait = Some(startup_wait);
}
if let Some(other_genesis) = other_test.genesis {
match &mut my_test.genesis {
Some(my_genesis) => {
for other_entry in other_genesis {
match my_genesis
.iter()
.position(|g| *g.address == other_entry.address)
{
None => my_genesis.push(other_entry),
Some(i) => my_genesis[i] = other_entry,
}
}
}
None => my_test.genesis = Some(other_genesis),
}
}
let mut my_validator = my_test.validator.take();
match &mut my_validator {
None => my_validator = other_test.validator,
Some(my_validator) => {
if let Some(other_validator) = other_test.validator {
my_validator.merge(other_validator)
}
}
}
my_test.validator = my_validator;
}
}
None => my_test = other.test,
};
// Instantiating a new Self object here ensures that
// this function will fail to compile if new fields get added
// to Self. This is useful as a reminder if they also require merging
*self = Self {
test: my_test,
scripts: my_scripts,
extends: self.extends.take(),
};
}
}
impl TryFrom<WithPath<_TestToml>> for TestToml {
type Error = Error;
fn try_from(mut value: WithPath<_TestToml>) -> Result<Self, Self::Error> {
Ok(Self {
test: value.test.take().map(Into::into),
scripts: value
.scripts
.take()
.ok_or_else(|| anyhow!("Missing 'scripts' section in Test.toml file."))?,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -561,13 +803,13 @@ pub struct AccountEntry {
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct Validator {
pub struct _Validator {
// Load an account from the provided JSON file
#[serde(skip_serializing_if = "Option::is_none")]
pub account: Option<Vec<AccountEntry>>,
// IP address to bind the validator ports. [default: 0.0.0.0]
#[serde(default = "default_bind_address")]
pub bind_address: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub bind_address: Option<String>,
// Copy an account from the cluster referenced by the url argument.
#[serde(skip_serializing_if = "Option::is_none")]
pub clone: Option<Vec<CloneEntry>>,
@ -575,8 +817,8 @@ pub struct Validator {
#[serde(skip_serializing_if = "Option::is_none")]
pub dynamic_port_range: Option<String>,
// Enable the faucet on this port [default: 9900].
#[serde(default = "default_faucet_port")]
pub faucet_port: u16,
#[serde(skip_serializing_if = "Option::is_none")]
pub faucet_port: Option<u16>,
// Give the faucet address this much SOL in genesis. [default: 1000000]
#[serde(skip_serializing_if = "Option::is_none")]
pub faucet_sol: Option<String>,
@ -590,14 +832,14 @@ pub struct Validator {
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
// Use DIR as ledger location
#[serde(default = "default_ledger_path")]
pub ledger: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub ledger: Option<String>,
// Keep this amount of shreds in root slots. [default: 10000]
#[serde(skip_serializing_if = "Option::is_none")]
pub limit_ledger_size: Option<String>,
// Enable JSON RPC on this port, and the next port for the RPC websocket. [default: 8899]
#[serde(default = "default_rpc_port")]
pub rpc_port: u16,
#[serde(skip_serializing_if = "Option::is_none")]
pub rpc_port: Option<u16>,
// Override the number of slots in an epoch.
#[serde(skip_serializing_if = "Option::is_none")]
pub slots_per_epoch: Option<String>,
@ -606,20 +848,148 @@ pub struct Validator {
pub warp_slot: Option<String>,
}
fn default_ledger_path() -> String {
".anchor/test-ledger".to_string()
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct Validator {
#[serde(skip_serializing_if = "Option::is_none")]
pub account: Option<Vec<AccountEntry>>,
pub bind_address: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub clone: Option<Vec<CloneEntry>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub dynamic_port_range: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub faucet_port: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
pub faucet_sol: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub gossip_host: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub gossip_port: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
pub ledger: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub limit_ledger_size: Option<String>,
pub rpc_port: u16,
#[serde(skip_serializing_if = "Option::is_none")]
pub slots_per_epoch: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub warp_slot: Option<String>,
}
fn default_bind_address() -> String {
"0.0.0.0".to_string()
impl From<_Validator> for Validator {
fn from(_validator: _Validator) -> Self {
Self {
account: _validator.account,
bind_address: _validator
.bind_address
.unwrap_or_else(|| DEFAULT_BIND_ADDRESS.to_string()),
clone: _validator.clone,
dynamic_port_range: _validator.dynamic_port_range,
faucet_port: _validator.faucet_port,
faucet_sol: _validator.faucet_sol,
gossip_host: _validator.gossip_host,
gossip_port: _validator.gossip_port,
url: _validator.url,
ledger: _validator
.ledger
.unwrap_or_else(|| DEFAULT_LEDGER_PATH.to_string()),
limit_ledger_size: _validator.limit_ledger_size,
rpc_port: _validator
.rpc_port
.unwrap_or(solana_sdk::rpc_port::DEFAULT_RPC_PORT),
slots_per_epoch: _validator.slots_per_epoch,
warp_slot: _validator.warp_slot,
}
}
}
pub fn default_rpc_port() -> u16 {
solana_sdk::rpc_port::DEFAULT_RPC_PORT
impl From<Validator> for _Validator {
fn from(validator: Validator) -> Self {
Self {
account: validator.account,
bind_address: Some(validator.bind_address),
clone: validator.clone,
dynamic_port_range: validator.dynamic_port_range,
faucet_port: validator.faucet_port,
faucet_sol: validator.faucet_sol,
gossip_host: validator.gossip_host,
gossip_port: validator.gossip_port,
url: validator.url,
ledger: Some(validator.ledger),
limit_ledger_size: validator.limit_ledger_size,
rpc_port: Some(validator.rpc_port),
slots_per_epoch: validator.slots_per_epoch,
warp_slot: validator.warp_slot,
}
}
}
pub fn default_faucet_port() -> u16 {
solana_faucet::faucet::FAUCET_PORT
const DEFAULT_LEDGER_PATH: &str = ".anchor/test-ledger";
const DEFAULT_BIND_ADDRESS: &str = "0.0.0.0";
impl Merge for _Validator {
fn merge(&mut self, other: Self) {
// Instantiating a new Self object here ensures that
// this function will fail to compile if new fields get added
// to Self. This is useful as a reminder if they also require merging
*self = Self {
account: match self.account.take() {
None => other.account,
Some(mut entries) => match other.account {
None => Some(entries),
Some(other_entries) => {
for other_entry in other_entries {
match entries
.iter()
.position(|my_entry| *my_entry.address == other_entry.address)
{
None => entries.push(other_entry),
Some(i) => entries[i] = other_entry,
};
}
Some(entries)
}
},
},
bind_address: other.bind_address.or_else(|| self.bind_address.take()),
clone: match self.clone.take() {
None => other.clone,
Some(mut entries) => match other.clone {
None => Some(entries),
Some(other_entries) => {
for other_entry in other_entries {
match entries
.iter()
.position(|my_entry| *my_entry.address == other_entry.address)
{
None => entries.push(other_entry),
Some(i) => entries[i] = other_entry,
};
}
Some(entries)
}
},
},
dynamic_port_range: other
.dynamic_port_range
.or_else(|| self.dynamic_port_range.take()),
faucet_port: other.faucet_port.or_else(|| self.faucet_port.take()),
faucet_sol: other.faucet_sol.or_else(|| self.faucet_sol.take()),
gossip_host: other.gossip_host.or_else(|| self.gossip_host.take()),
gossip_port: other.gossip_port.or_else(|| self.gossip_port.take()),
url: other.url.or_else(|| self.url.take()),
ledger: other.ledger.or_else(|| self.ledger.take()),
limit_ledger_size: other
.limit_ledger_size
.or_else(|| self.limit_ledger_size.take()),
rpc_port: other.rpc_port.or_else(|| self.rpc_port.take()),
slots_per_epoch: other
.slots_per_epoch
.or_else(|| self.slots_per_epoch.take()),
warp_slot: other.warp_slot.or_else(|| self.warp_slot.take()),
};
}
}
#[derive(Debug, Clone)]

View File

@ -1,6 +1,6 @@
use crate::config::{
AnchorPackage, BootstrapMode, BuildConfig, Config, ConfigOverride, Manifest, ProgramDeployment,
ProgramWorkspace, Test, WithPath,
ProgramWorkspace, ScriptsConfig, TestValidator, WithPath, SHUTDOWN_WAIT, STARTUP_WAIT,
};
use anchor_client::Cluster;
use anchor_lang::idl::{IdlAccount, IdlInstruction};
@ -8,7 +8,6 @@ use anchor_lang::{AccountDeserialize, AnchorDeserialize, AnchorSerialize};
use anchor_syn::idl::Idl;
use anyhow::{anyhow, Context, Result};
use clap::Parser;
use config::{default_faucet_port, default_rpc_port};
use flate2::read::GzDecoder;
use flate2::read::ZlibDecoder;
use flate2::write::{GzEncoder, ZlibEncoder};
@ -1278,7 +1277,7 @@ fn verify(
.join("target/verifiable/")
.join(format!("{}.so", binary_name));
let url = cluster_url(&cfg);
let url = cluster_url(&cfg, &cfg.test_validator);
let bin_ver = verify_bin(program_id, &bin_path, &url)?;
if !bin_ver.is_verified {
println!("Error: Binaries don't match");
@ -1424,7 +1423,7 @@ pub enum BinVerificationState {
// Fetches an IDL for the given program_id.
fn fetch_idl(cfg_override: &ConfigOverride, idl_addr: Pubkey) -> Result<Idl> {
let url = match Config::discover(cfg_override)? {
Some(cfg) => cluster_url(&cfg),
Some(cfg) => cluster_url(&cfg, &cfg.test_validator),
None => {
// If the command is not run inside a workspace,
// cluster_url will be used from default solana config
@ -1538,7 +1537,7 @@ fn idl_set_buffer(cfg_override: &ConfigOverride, program_id: Pubkey, buffer: Pub
with_workspace(cfg_override, |cfg| {
let keypair = solana_sdk::signature::read_keypair_file(&cfg.provider.wallet.to_string())
.map_err(|_| anyhow!("Unable to read keypair file"))?;
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let client = RpcClient::new(url);
// Instruction to set the buffer onto the IdlAccount.
@ -1591,7 +1590,7 @@ fn idl_upgrade(
fn idl_authority(cfg_override: &ConfigOverride, program_id: Pubkey) -> Result<()> {
with_workspace(cfg_override, |cfg| {
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let client = RpcClient::new(url);
let idl_address = {
let account = client
@ -1629,7 +1628,7 @@ fn idl_set_authority(
};
let keypair = solana_sdk::signature::read_keypair_file(&cfg.provider.wallet.to_string())
.map_err(|_| anyhow!("Unable to read keypair file"))?;
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let client = RpcClient::new(url);
// Instruction data.
@ -1700,7 +1699,7 @@ fn idl_write(cfg: &Config, program_id: &Pubkey, idl: &Idl, idl_address: Pubkey)
// Misc.
let keypair = solana_sdk::signature::read_keypair_file(&cfg.provider.wallet.to_string())
.map_err(|_| anyhow!("Unable to read keypair file"))?;
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let client = RpcClient::new(url);
// Serialize and compress the idl.
@ -1841,95 +1840,156 @@ fn test(
if (!is_localnet || skip_local_validator) && !skip_deploy {
deploy(cfg_override, None)?;
}
// Start local test validator, if needed.
let mut validator_handle = None;
if is_localnet && (!skip_local_validator) {
let flags = match skip_deploy {
true => None,
false => Some(validator_flags(cfg)?),
};
validator_handle = Some(start_test_validator(cfg, flags, true)?);
let mut is_first_suite = true;
if cfg.scripts.get("test").is_some() {
is_first_suite = false;
println!("\nFound a 'test' script in the Anchor.toml. Running it as a test suite!");
run_test_suite(
cfg.path(),
cfg,
is_localnet,
skip_local_validator,
skip_deploy,
detach,
&cfg.test_validator,
&cfg.scripts,
&extra_args,
)?;
}
let url = cluster_url(cfg);
let node_options = format!(
"{} {}",
match std::env::var_os("NODE_OPTIONS") {
Some(value) => value
.into_string()
.map_err(std::env::VarError::NotUnicode)?,
None => "".to_owned(),
},
get_node_dns_option()?,
);
// Setup log reader.
let log_streams = stream_logs(cfg, &url);
// Run the tests.
let test_result: Result<_> = {
let cmd = cfg
.scripts
.get("test")
.expect("Not able to find command for `test`")
.clone();
let mut args: Vec<&str> = cmd
.split(' ')
.chain(extra_args.iter().map(|arg| arg.as_str()))
.collect();
let program = args.remove(0);
std::process::Command::new(program)
.args(args)
.env("ANCHOR_PROVIDER_URL", url)
.env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
.env("NODE_OPTIONS", node_options)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(anyhow::Error::from)
.context(cmd)
};
// Keep validator running if needed.
if test_result.is_ok() && detach {
println!("Local validator still running. Press Ctrl + C quit.");
std::io::stdin().lock().lines().next().unwrap().unwrap();
}
// Check all errors and shut down.
if let Some(mut child) = validator_handle {
if let Err(err) = child.kill() {
println!("Failed to kill subprocess {}: {}", child.id(), err);
}
}
for mut child in log_streams? {
if let Err(err) = child.kill() {
println!("Failed to kill subprocess {}: {}", child.id(), err);
}
}
// Must exist *after* shutting down the validator and log streams.
match test_result {
Ok(exit) => {
if !exit.status.success() {
std::process::exit(exit.status.code().unwrap());
if let Some(test_config) = &cfg.test_config {
for test_suite in test_config.iter() {
if !is_first_suite {
std::thread::sleep(std::time::Duration::from_millis(
test_suite
.1
.test
.as_ref()
.map(|val| val.shutdown_wait)
.unwrap_or(SHUTDOWN_WAIT) as u64,
));
} else {
is_first_suite = false;
}
}
Err(err) => {
println!("Failed to run test: {:#}", err)
run_test_suite(
test_suite.0,
cfg,
is_localnet,
skip_local_validator,
skip_deploy,
detach,
&test_suite.1.test,
&test_suite.1.scripts,
&extra_args,
)?;
}
}
Ok(())
})
}
#[allow(clippy::too_many_arguments)]
fn run_test_suite(
test_suite_path: impl AsRef<Path>,
cfg: &WithPath<Config>,
is_localnet: bool,
skip_local_validator: bool,
skip_deploy: bool,
detach: bool,
test_validator: &Option<TestValidator>,
scripts: &ScriptsConfig,
extra_args: &[String],
) -> Result<()> {
println!("\nRunning test suite: {:#?}\n", test_suite_path.as_ref());
// Start local test validator, if needed.
let mut validator_handle = None;
if is_localnet && (!skip_local_validator) {
let flags = match skip_deploy {
true => None,
false => Some(validator_flags(cfg, test_validator)?),
};
validator_handle = Some(start_test_validator(cfg, test_validator, flags, true)?);
}
let url = cluster_url(cfg, test_validator);
let node_options = format!(
"{} {}",
match std::env::var_os("NODE_OPTIONS") {
Some(value) => value
.into_string()
.map_err(std::env::VarError::NotUnicode)?,
None => "".to_owned(),
},
get_node_dns_option()?,
);
// Setup log reader.
let log_streams = stream_logs(cfg, &url);
// Run the tests.
let test_result: Result<_> = {
let cmd = scripts
.get("test")
.expect("Not able to find script for `test`")
.clone();
let mut args: Vec<&str> = cmd
.split(' ')
.chain(extra_args.iter().map(|arg| arg.as_str()))
.collect();
let program = args.remove(0);
std::process::Command::new(program)
.args(args)
.env("ANCHOR_PROVIDER_URL", url)
.env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
.env("NODE_OPTIONS", node_options)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(anyhow::Error::from)
.context(cmd)
};
// Keep validator running if needed.
if test_result.is_ok() && detach {
println!("Local validator still running. Press Ctrl + C quit.");
std::io::stdin().lock().lines().next().unwrap().unwrap();
}
// Check all errors and shut down.
if let Some(mut child) = validator_handle {
if let Err(err) = child.kill() {
println!("Failed to kill subprocess {}: {}", child.id(), err);
}
}
for mut child in log_streams? {
if let Err(err) = child.kill() {
println!("Failed to kill subprocess {}: {}", child.id(), err);
}
}
// Must exist *after* shutting down the validator and log streams.
match test_result {
Ok(exit) => {
if !exit.status.success() {
std::process::exit(exit.status.code().unwrap());
}
}
Err(err) => {
println!("Failed to run test: {:#}", err)
}
}
Ok(())
}
// Returns the solana-test-validator flags. This will embed the workspace
// programs in the genesis block so we don't have to deploy every time. It also
// allows control of other solana-test-validator features.
fn validator_flags(cfg: &WithPath<Config>) -> Result<Vec<String>> {
fn validator_flags(
cfg: &WithPath<Config>,
test_validator: &Option<TestValidator>,
) -> Result<Vec<String>> {
let programs = cfg.programs.get(&Cluster::Localnet);
let mut flags = Vec::new();
@ -1959,7 +2019,7 @@ fn validator_flags(cfg: &WithPath<Config>) -> Result<Vec<String>> {
}
}
if let Some(test) = cfg.test.as_ref() {
if let Some(test) = test_validator.as_ref() {
if let Some(genesis) = &test.genesis {
for entry in genesis {
let program_path = Path::new(&entry.program);
@ -2090,7 +2150,7 @@ fn stream_logs(config: &WithPath<Config>, rpc_url: &str) -> Result<Vec<std::proc
.spawn()?;
handles.push(child);
}
if let Some(test) = config.test.as_ref() {
if let Some(test) = config.test_validator.as_ref() {
if let Some(genesis) = &test.genesis {
for entry in genesis {
let log_file = File::create(format!("{}/{}.log", program_logs_dir, entry.address))?;
@ -2117,11 +2177,13 @@ pub struct IdlTestMetadata {
fn start_test_validator(
cfg: &Config,
test_validator: &Option<TestValidator>,
flags: Option<Vec<String>>,
test_log_stdout: bool,
) -> Result<Child> {
//
let (test_ledger_directory, test_ledger_log_filename) = test_validator_file_paths(cfg);
let (test_ledger_directory, test_ledger_log_filename) =
test_validator_file_paths(test_validator);
// Start a validator for testing.
let (test_validator_stdout, test_validator_stderr) = match test_log_stdout {
@ -2136,23 +2198,23 @@ fn start_test_validator(
false => (Stdio::inherit(), Stdio::inherit()),
};
let rpc_url = test_validator_rpc_url(cfg);
let rpc_url = test_validator_rpc_url(test_validator);
let rpc_port = cfg
.test
.test_validator
.as_ref()
.and_then(|test| test.validator.as_ref().map(|v| v.rpc_port))
.unwrap_or_else(default_rpc_port);
.unwrap_or(solana_sdk::rpc_port::DEFAULT_RPC_PORT);
if !portpicker::is_free(rpc_port) {
return Err(anyhow!(
"Your configured rpc port: {rpc_port} is already in use"
));
}
let faucet_port = cfg
.test
.test_validator
.as_ref()
.and_then(|test| test.validator.as_ref().map(|v| v.faucet_port))
.unwrap_or_else(default_faucet_port);
.and_then(|test| test.validator.as_ref().and_then(|v| v.faucet_port))
.unwrap_or(solana_faucet::faucet::FAUCET_PORT);
if !portpicker::is_free(faucet_port) {
return Err(anyhow!(
"Your configured faucet port: {faucet_port} is already in use"
@ -2173,11 +2235,10 @@ fn start_test_validator(
// Wait for the validator to be ready.
let client = RpcClient::new(rpc_url);
let mut count = 0;
let ms_wait = cfg
.test
let ms_wait = test_validator
.as_ref()
.and_then(|test| test.startup_wait)
.unwrap_or(5_000);
.map(|test| test.startup_wait)
.unwrap_or(STARTUP_WAIT);
while count < ms_wait {
let r = client.get_latest_blockhash();
if r.is_ok() {
@ -2199,9 +2260,9 @@ fn start_test_validator(
// Return the URL that solana-test-validator should be running on given the
// configuration
fn test_validator_rpc_url(cfg: &Config) -> String {
match &cfg.test.as_ref() {
Some(Test {
fn test_validator_rpc_url(test_validator: &Option<TestValidator>) -> String {
match test_validator {
Some(TestValidator {
validator: Some(validator),
..
}) => format!("http://{}:{}", validator.bind_address, validator.rpc_port),
@ -2211,9 +2272,9 @@ fn test_validator_rpc_url(cfg: &Config) -> String {
// Setup and return paths to the solana-test-validator ledger directory and log
// files given the configuration
fn test_validator_file_paths(cfg: &Config) -> (String, String) {
let ledger_directory = match &cfg.test.as_ref() {
Some(Test {
fn test_validator_file_paths(test_validator: &Option<TestValidator>) -> (String, String) {
let ledger_directory = match test_validator {
Some(TestValidator {
validator: Some(validator),
..
}) => &validator.ledger,
@ -2238,12 +2299,12 @@ fn test_validator_file_paths(cfg: &Config) -> (String, String) {
)
}
fn cluster_url(cfg: &Config) -> String {
fn cluster_url(cfg: &Config, test_validator: &Option<TestValidator>) -> String {
let is_localnet = cfg.provider.cluster == Cluster::Localnet;
match is_localnet {
// Cluster is Localnet, assume the intent is to use the configuration
// for solana-test-validator
true => test_validator_rpc_url(cfg),
true => test_validator_rpc_url(test_validator),
false => cfg.provider.cluster.url().to_string(),
}
}
@ -2278,7 +2339,7 @@ fn clean(cfg_override: &ConfigOverride) -> Result<()> {
fn deploy(cfg_override: &ConfigOverride, program_str: Option<String>) -> Result<()> {
with_workspace(cfg_override, |cfg| {
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let keypair = cfg.provider.wallet.to_string();
// Deploy the programs.
@ -2352,7 +2413,7 @@ fn upgrade(
let program_filepath = path.canonicalize()?.display().to_string();
with_workspace(cfg_override, |cfg| {
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let exit = std::process::Command::new("solana")
.arg("program")
.arg("deploy")
@ -2385,7 +2446,7 @@ fn create_idl_account(
let idl_address = IdlAccount::address(program_id);
let keypair = solana_sdk::signature::read_keypair_file(keypair_path)
.map_err(|_| anyhow!("Unable to read keypair file"))?;
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let client = RpcClient::new(url);
let idl_data = serialize_idl(idl)?;
@ -2439,7 +2500,7 @@ fn create_idl_buffer(
) -> Result<Pubkey> {
let keypair = solana_sdk::signature::read_keypair_file(keypair_path)
.map_err(|_| anyhow!("Unable to read keypair file"))?;
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let client = RpcClient::new(url);
let buffer = Keypair::generate(&mut OsRng);
@ -2513,7 +2574,7 @@ fn migrate(cfg_override: &ConfigOverride) -> Result<()> {
with_workspace(cfg_override, |cfg| {
println!("Running migration deploy script");
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let cur_dir = std::env::current_dir()?;
let use_ts =
@ -2669,7 +2730,7 @@ fn shell(cfg_override: &ConfigOverride) -> Result<()> {
.collect::<Vec<ProgramWorkspace>>(),
}
};
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let js_code = template::node_shell(&url, &cfg.provider.wallet.to_string(), programs)?;
let mut child = std::process::Command::new("node")
.args(&["-e", &js_code, "-i", "--experimental-repl-await"])
@ -2688,7 +2749,7 @@ fn shell(cfg_override: &ConfigOverride) -> Result<()> {
fn run(cfg_override: &ConfigOverride, script: String) -> Result<()> {
with_workspace(cfg_override, |cfg| {
let url = cluster_url(cfg);
let url = cluster_url(cfg, &cfg.test_validator);
let script = cfg
.scripts
.get(&script)
@ -2955,13 +3016,13 @@ fn localnet(
let flags = match skip_deploy {
true => None,
false => Some(validator_flags(cfg)?),
false => Some(validator_flags(cfg, &cfg.test_validator)?),
};
let validator_handle = &mut start_test_validator(cfg, flags, false)?;
let validator_handle = &mut start_test_validator(cfg, &cfg.test_validator, flags, false)?;
// Setup log reader.
let url = test_validator_rpc_url(cfg);
let url = test_validator_rpc_url(&cfg.test_validator);
let log_streams = stream_logs(cfg, &url);
std::io::stdin().lock().lines().next().unwrap().unwrap();

View File

@ -0,0 +1,18 @@
[features]
seeds = false
[programs.localnet]
multiple_suites = "Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"
[registry]
url = "https://anchor.projectserum.com"
[provider]
cluster = "localnet"
wallet = "~/.config/solana/id.json"
[test]
startup_wait = 20000
[[test.validator.account]]
address = "C4XeBpzX4tDjGV1gkLsj7jJh6XHunVqAykANWCfTLszw"
filename = "./tests/accounts/SOME_ACCOUNT.json"

View File

@ -0,0 +1,4 @@
[workspace]
members = [
"programs/*"
]

View File

@ -0,0 +1,12 @@
// Migrations are an early feature. Currently, they're nothing more than this
// single deploy script that's invoked from the CLI, injecting a provider
// configured from the workspace's Anchor.toml.
const anchor = require("@project-serum/anchor");
module.exports = async function (provider) {
// Configure client to use the provider.
anchor.setProvider(provider);
// Add your deploy script here.
};

View File

@ -0,0 +1,19 @@
{
"name": "multiple-suites",
"version": "0.23.0",
"license": "(MIT OR Apache-2.0)",
"homepage": "https://github.com/project-serum/anchor#readme",
"bugs": {
"url": "https://github.com/project-serum/anchor/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/project-serum/anchor.git"
},
"engines": {
"node": ">=11"
},
"scripts": {
"test": "anchor test"
}
}

View File

@ -0,0 +1,19 @@
[package]
name = "multiple-suites"
version = "0.1.0"
description = "Created with Anchor"
edition = "2018"
[lib]
crate-type = ["cdylib", "lib"]
name = "multiple_suites"
[features]
no-entrypoint = []
no-idl = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
default = []
[dependencies]
anchor-lang = "0.23.0"

View File

@ -0,0 +1,2 @@
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -0,0 +1,16 @@
use anchor_lang::prelude::*;
declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS");
#[program]
pub mod multiple_suites {
use super::*;
// _val to ensure tx are different so they don't get rejected.
pub fn initialize(_ctx: Context<Initialize>, _val: u64) -> Result<()> {
Ok(())
}
}
#[derive(Accounts)]
pub struct Initialize {}

View File

@ -0,0 +1,5 @@
extends = ["./Test.root.base.toml"]
[[test.validator.account]]
address = "C4XeBpzX4tDjGV1gkLsj7jJh6XHunVqAykANWCfTLszw"
filename = "./accounts/SOME_TOKEN.json"

View File

@ -0,0 +1,12 @@
[test]
startup_wait = 20000
[test.validator]
url = "https://api.mainnet-beta.solana.com"
[[test.validator.clone]]
address = "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s"
[[test.validator.account]]
address = "JC7Vcye5upE6tMLAjAem76MCGuPNidTtg2cuYm71UukH"
filename = "./accounts/ANOTHER_ACC.json"

View File

@ -0,0 +1,13 @@
{
"pubkey": "JC7Vcye5upE6tMLAjAem76MCGuPNidTtg2cuYm71UukH",
"account": {
"lamports": 1141440,
"data": [
"AgAAALd5VermN3GAImByma8xRkcfPsbJkljqjdkZwDg8igZy",
"base64"
],
"owner": "BPFLoaderUpgradeab1e11111111111111111111111",
"executable": true,
"rentEpoch": 211
}
}

View File

@ -0,0 +1,13 @@
{
"pubkey": "3vMPj13emX9JmifYcWc77ekEzV1F37ga36E1YeSr6Mdj",
"account": {
"lamports": 7906560,
"data": [
"oZzT/fpANfoAAAacjYHHz87iGrP2++EgvfsQjuGvj1dqAHoCKU9FPDLg9bWjnUCkIZgQQSmJpClN5jbVq6JbdM4Ec4ZpEb2ViTzO//sSNXmZhK5OA5kiKWvPWoUsXExQpNgiaEeW8MWANStiNrxKs/p6yWv9cqbJBd6AygqKx0Y7WysO04FMaSFa/6o5Lt/NyPH8jkM12IYJcvGzIQIbaXRo9eDbJOcPskyugDoJAAAAAAAAAAAAAAAAAAAAAAAAAAAAFQMAAAAAAAB4AQAAAAAAAHIDAAAAAAAAcgMAAAAAAAABAAAAAAAAAAjqplnVZa4ZML82vxalcFuNRlmUiSpI+ZGRVoMP+eF1hDOHYqnPMZXS8rUwGGcq0p98MYBGv4V+9NsD84s31F0BRTMAAAAAAAAAAACAOgkAAAAAAPySPwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB8Cd4FAAAAAL4E7wIAAAAA6pstAQAAAAAAAR0AAAAAALHFQ2IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAabiFf+q4GE+2h/Y0YYwDXaxDncGus7VZig8AAAAAABb8MVpWsIyg7EhPN8Vhh8GnNx3+zvD0Kz0wqIECi79vwBAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"base64"
],
"owner": "7NryUmAmy8hMbawRNUs9UdkgAGEiHFxghSfve31dC7xh",
"executable": false,
"rentEpoch": 292
}
}

View File

@ -0,0 +1,13 @@
{
"pubkey": "C4XeBpzX4tDjGV1gkLsj7jJh6XHunVqAykANWCfTLszw",
"account": {
"lamports": 839123197950,
"data": [
"AAAAAFnkpzMo+KIHXFu0C7POimfWZAwz81Y+ImohwO+lC39o2tFLxYmAIwAGAQAAAABZ5KczKPiiB1xbtAuzzopn1mQMM/NWPiJqIcDvpQt/aA==",
"base64"
],
"owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA",
"executable": false,
"rentEpoch": 292
}
}

View File

@ -0,0 +1,4 @@
extends = ["../Test.base.toml"]
[scripts]
test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/another-suite/**/*.ts"

View File

@ -0,0 +1,44 @@
import * as anchor from "@project-serum/anchor";
import { Program } from "@project-serum/anchor";
import { PublicKey } from "@solana/web3.js";
import { assert } from "chai";
import { MultipleSuites } from "../../target/types/multiple_suites";
describe("multiple-suites", () => {
// Configure the client to use the local cluster.
anchor.setProvider(anchor.Provider.env());
const program = anchor.workspace.MultipleSuites as Program<MultipleSuites>;
it("Is initialized!", async () => {
// Add your test here.
const tx = await program.rpc.initialize(new anchor.BN(100000), {});
// SOME_TOKEN.json should exist.
const SOME_TOKEN = await program.provider.connection.getAccountInfo(
new PublicKey("C4XeBpzX4tDjGV1gkLsj7jJh6XHunVqAykANWCfTLszw")
);
// SOME_ACCOUNT.json should NOT exist.
const SOME_ACCOUNT = await program.provider.connection.getAccountInfo(
new PublicKey("3vMPj13emX9JmifYcWc77ekEzV1F37ga36E1YeSr6Mdj")
);
// ANOTHER_ACC.json should exist.
const ANOTHER_ACC = await program.provider.connection.getAccountInfo(
new PublicKey("JC7Vcye5upE6tMLAjAem76MCGuPNidTtg2cuYm71UukH")
);
// CLONED ACC should exist.
const CLONED_ACC = await program.provider.connection.getAccountInfo(
new PublicKey("metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s")
);
assert.isNotNull(SOME_TOKEN);
assert.isNull(SOME_ACCOUNT);
assert.isNotNull(ANOTHER_ACC);
assert.isNotNull(CLONED_ACC);
console.log("Your transaction signature", tx);
});
});

View File

@ -0,0 +1,4 @@
extends = ["../../Test.base.toml"]
[scripts]
test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/fourth-and-fifth-suite/fifth-suite/**/*.ts"

View File

@ -0,0 +1,43 @@
import * as anchor from "@project-serum/anchor";
import { Program } from "@project-serum/anchor";
import { PublicKey } from "@solana/web3.js";
import { assert } from "chai";
import { MultipleSuites } from "../../../target/types/multiple_suites";
describe("multiple-suites", () => {
// Configure the client to use the local cluster.
anchor.setProvider(anchor.Provider.env());
const program = anchor.workspace.MultipleSuites as Program<MultipleSuites>;
it("Is initialized!", async () => {
// Add your test here.
const tx = await program.rpc.initialize(new anchor.BN(4389242), {});
// SOME_TOKEN.json should exist.
const SOME_TOKEN = await program.provider.connection.getAccountInfo(
new PublicKey("C4XeBpzX4tDjGV1gkLsj7jJh6XHunVqAykANWCfTLszw")
);
// SOME_ACCOUNT.json should NOT exist.
const SOME_ACCOUNT = await program.provider.connection.getAccountInfo(
new PublicKey("3vMPj13emX9JmifYcWc77ekEzV1F37ga36E1YeSr6Mdj")
);
// ANOTHER_ACC.json should exist.
const ANOTHER_ACC = await program.provider.connection.getAccountInfo(
new PublicKey("JC7Vcye5upE6tMLAjAem76MCGuPNidTtg2cuYm71UukH")
);
// CLONED ACC should exist.
const CLONED_ACC = await program.provider.connection.getAccountInfo(
new PublicKey("metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s")
);
assert.isNotNull(SOME_TOKEN);
assert.isNull(SOME_ACCOUNT);
assert.isNotNull(ANOTHER_ACC);
assert.isNotNull(CLONED_ACC);
console.log("Your transaction signature", tx);
});
});

View File

@ -0,0 +1,4 @@
extends = ["../../Test.base.toml"]
[scripts]
test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/fourth-and-fifth-suite/forth-suite/**/*.ts"

View File

@ -0,0 +1,43 @@
import * as anchor from "@project-serum/anchor";
import { Program } from "@project-serum/anchor";
import { PublicKey } from "@solana/web3.js";
import { assert } from "chai";
import { MultipleSuites } from "../../../target/types/multiple_suites";
describe("multiple-suites", () => {
// Configure the client to use the local cluster.
anchor.setProvider(anchor.Provider.env());
const program = anchor.workspace.MultipleSuites as Program<MultipleSuites>;
it("Is initialized!", async () => {
// Add your test here.
const tx = await program.rpc.initialize(new anchor.BN(34823), {});
// SOME_TOKEN.json should exist.
const SOME_TOKEN = await program.provider.connection.getAccountInfo(
new PublicKey("C4XeBpzX4tDjGV1gkLsj7jJh6XHunVqAykANWCfTLszw")
);
// SOME_ACCOUNT.json should NOT exist.
const SOME_ACCOUNT = await program.provider.connection.getAccountInfo(
new PublicKey("3vMPj13emX9JmifYcWc77ekEzV1F37ga36E1YeSr6Mdj")
);
// ANOTHER_ACC.json should exist.
const ANOTHER_ACC = await program.provider.connection.getAccountInfo(
new PublicKey("JC7Vcye5upE6tMLAjAem76MCGuPNidTtg2cuYm71UukH")
);
// CLONED ACC should exist.
const CLONED_ACC = await program.provider.connection.getAccountInfo(
new PublicKey("metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s")
);
assert.isNotNull(SOME_TOKEN);
assert.isNull(SOME_ACCOUNT);
assert.isNotNull(ANOTHER_ACC);
assert.isNotNull(CLONED_ACC);
console.log("Your transaction signature", tx);
});
});

View File

@ -0,0 +1,4 @@
extends = ["../../Anchor.toml"]
[scripts]
test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/multiple-suites/**/*.ts"

View File

@ -0,0 +1,32 @@
import * as anchor from "@project-serum/anchor";
import { Program } from "@project-serum/anchor";
import { PublicKey } from "@solana/web3.js";
import { assert } from "chai";
import { MultipleSuites } from "../../target/types/multiple_suites";
describe("multiple-suites", () => {
// Configure the client to use the local cluster.
anchor.setProvider(anchor.Provider.env());
const program = anchor.workspace.MultipleSuites as Program<MultipleSuites>;
it("Is initialized!", async () => {
// Add your test here.
const tx = await program.rpc.initialize(new anchor.BN(2394832), {});
// SOME_TOKEN.json should NOT exist.
const SOME_TOKEN = await program.provider.connection.getAccountInfo(
new PublicKey("C4XeBpzX4tDjGV1gkLsj7jJh6XHunVqAykANWCfTLszw")
);
// SOME_ACCOUNT.json should exist.
const SOME_ACCOUNT = await program.provider.connection.getAccountInfo(
new PublicKey("3vMPj13emX9JmifYcWc77ekEzV1F37ga36E1YeSr6Mdj")
);
assert.isNull(SOME_TOKEN);
assert.isNotNull(SOME_ACCOUNT);
console.log("Your transaction signature", tx);
});
});

View File

@ -0,0 +1,4 @@
extends = ["../Test.base.toml"]
[scripts]
test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/third-suite/**/*.ts"

View File

@ -0,0 +1,32 @@
import * as anchor from "@project-serum/anchor";
import { Program } from "@project-serum/anchor";
import { PublicKey } from "@solana/web3.js";
import { MultipleSuites } from "../../../target/types/multiple_suites";
import { assert } from "chai";
describe("multiple-suites", () => {
// Configure the client to use the local cluster.
anchor.setProvider(anchor.Provider.env());
const program = anchor.workspace.MultipleSuites as Program<MultipleSuites>;
it("Is initialized!", async () => {
// Add your test here.
const tx = await program.rpc.initialize(new anchor.BN(347234), {});
// SOME_TOKEN.json should exist.
const SOME_TOKEN = await program.provider.connection.getAccountInfo(
new PublicKey("C4XeBpzX4tDjGV1gkLsj7jJh6XHunVqAykANWCfTLszw")
);
// SOME_ACCOUNT.json should NOT exist.
const SOME_ACCOUNT = await program.provider.connection.getAccountInfo(
new PublicKey("3vMPj13emX9JmifYcWc77ekEzV1F37ga36E1YeSr6Mdj")
);
assert.isNotNull(SOME_TOKEN);
assert.isNull(SOME_ACCOUNT);
console.log("Your transaction signature", tx);
});
});

View File

@ -0,0 +1,32 @@
import * as anchor from "@project-serum/anchor";
import { Program } from "@project-serum/anchor";
import { PublicKey } from "@solana/web3.js";
import { assert } from "chai";
import { MultipleSuites } from "../../../target/types/multiple_suites";
describe("multiple-suites", () => {
// Configure the client to use the local cluster.
anchor.setProvider(anchor.Provider.env());
const program = anchor.workspace.MultipleSuites as Program<MultipleSuites>;
it("Is initialized!", async () => {
// Add your test here.
const tx = await program.rpc.initialize(new anchor.BN(9348239), {});
// SOME_TOKEN.json should exist.
const SOME_TOKEN = await program.provider.connection.getAccountInfo(
new PublicKey("C4XeBpzX4tDjGV1gkLsj7jJh6XHunVqAykANWCfTLszw")
);
// SOME_ACCOUNT.json should NOT exist.
const SOME_ACCOUNT = await program.provider.connection.getAccountInfo(
new PublicKey("3vMPj13emX9JmifYcWc77ekEzV1F37ga36E1YeSr6Mdj")
);
assert.isNotNull(SOME_TOKEN);
assert.isNull(SOME_ACCOUNT);
console.log("Your transaction signature", tx);
});
});

View File

@ -0,0 +1,10 @@
{
"compilerOptions": {
"types": ["mocha", "chai"],
"typeRoots": ["./node_modules/@types"],
"lib": ["es2015"],
"module": "commonjs",
"target": "es6",
"esModuleInterop": true
}
}

View File

@ -32,7 +32,8 @@
"validator-clone",
"zero-copy",
"declare-id",
"cpi-returns"
"cpi-returns",
"multiple-suites"
],
"dependencies": {
"@project-serum/anchor": "^0.23.0",