Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
/// ...
/// }
/// }
///
/// mod tests {
/// // because of macro syntax limitations, benches can't be paths, but has
/// // to be idents in the scope of `impl_benchmark_test_suite`.
/// use crate::benches;
///
/// impl_benchmark_test_suite!(new_test_ext(), benchmarks_path = benches);
///
/// // new_test_ext are defined later in this module
/// }
/// ```
///
/// There is an optional 3rd argument, with keyword syntax: `extra = true` or
/// `extra = false`. By default, this generates a test suite which iterates over
/// all benchmarks, including those marked with the `#[extra]` annotation.
/// Setting `extra = false` excludes those.
///
/// There is an optional 4th argument, with keyword syntax: `exec_name =
/// custom_exec_name`. By default, this macro uses `execute_with` for this
/// parameter. This argument, if set, is subject to these restrictions:
///
/// - It must be the name of a method applied to the output of the
/// `new_test_ext` argument.
/// - That method must have a signature capable of receiving a single argument
/// of the form `impl FnOnce()`.
#[macro_export]
macro_rules! impl_benchmark_test_suite {
// user might or might not have set some keyword arguments; set the defaults
//
// The weird syntax indicates that `rest` comes only after a comma, which is otherwise optional
(
$new_test_ext:expr,
$(, $( $rest:tt )* )?
) => {
impl_benchmark_test_suite!(
@selected:
$new_test_ext,
benchmarks_path = super,
extra = true,
exec_name = execute_with,
@user:
$( $( $rest )* )?
);
};
// pick off the benchmarks_path keyword argument
(
@selected:
$new_test_ext:expr,
benchmarks_path = $old:ident,
extra = $extra:expr,
exec_name = $exec_name:ident,
@user:
benchmarks_path = $benchmarks_path:ident
$(, $( $rest:tt )* )?
) => {
impl_benchmark_test_suite!(
@selected:
$new_test_ext,
benchmarks_path = $benchmarks_path,
extra = $extra,
exec_name = $exec_name,
@user:
$( $( $rest )* )?
);
};
// pick off the extra keyword argument
(
@selected:
$new_test_ext:expr,
benchmarks_path = $benchmarks_path:ident,
extra = $old:expr,
exec_name = $exec_name:ident,
@user:
extra = $extra:expr
$(, $( $rest:tt )* )?
) => {
impl_benchmark_test_suite!(
@selected:
$new_test_ext,
benchmarks_path = $benchmarks_path,
extra = $extra,
exec_name = $exec_name,
@user:
$( $( $rest )* )?
);
};
// pick off the exec_name keyword argument
(
@selected:
$new_test_ext:expr,
benchmarks_path = $benchmarks_path:ident,
extra = $extra:expr,
exec_name = $old:ident,
@user:
exec_name = $exec_name:ident
$(, $( $rest:tt )* )?
) => {
impl_benchmark_test_suite!(
@selected:
$new_test_ext,
benchmarks_path = $benchmarks_path,
extra = $extra,
exec_name = $exec_name,
@user:
$( $( $rest )* )?
);
};
// all options set; nothing else in user-provided keyword arguments
(
@selected:
$new_test_ext:expr,
benchmarks_path = $path_to_benchmarks_invocation:ident,
extra = $extra:expr,
exec_name = $exec_name:ident,
@user:
$(,)?
) => {
#[cfg(test)]
mod benchmark_tests {
use $path_to_benchmarks_invocation::test_bench_by_name;
use super::*;
#[test]
fn test_benchmarks() {
$new_test_ext.$exec_name(|| {
use $crate::Benchmarking;
let mut anything_failed = false;
println!("failing benchmark tests:");
for benchmark_name in $path_to_benchmarks_invocation::Benchmark::benchmarks($extra) {
match std::panic::catch_unwind(|| test_bench_by_name(benchmark_name)) {
Err(err) => {
println!("{}: {:?}", String::from_utf8_lossy(benchmark_name), err);
anything_failed = true;
},
Ok(Err(err)) => {
println!("{}: {}", String::from_utf8_lossy(benchmark_name), err);
anything_failed = true;
},
Ok(Ok(_)) => (),
}
}
assert!(!anything_failed);
});
}
}
};
}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
/// show error message and debugging info for the case of an error happening
/// during a benchmark
#[allow(clippy::too_many_arguments)]
pub fn show_benchmark_debug_info(
instance_string: &[u8],
benchmark: &[u8],
lowest_range_values: &[u32],
highest_range_values: &[u32],
steps: &[u32],
repeat: &u32,
verify: &bool,
error_message: &str,
) -> sp_runtime::RuntimeString {
sp_runtime::format_runtime_string!(
"\n* Pallet: {}\n\
* Benchmark: {}\n\
* Lowest_range_values: {:?}\n\
* Highest_range_values: {:?}\n\
* Steps: {:?}\n\
* Repeat: {:?}\n\
* Verify: {:?}\n\
* Error message: {}",
sp_std::str::from_utf8(instance_string).expect("it's all just strings ran through the wasm interface. qed"),
sp_std::str::from_utf8(benchmark).expect("it's all just strings ran through the wasm interface. qed"),
lowest_range_values,
highest_range_values,
steps,
repeat,
verify,
error_message,
)
}
/// This macro adds pallet benchmarks to a `Vec<BenchmarkBatch>` object.
///
/// First create an object that holds in the input parameters for the benchmark:
///
/// ```ignore
/// The `whitelist` is a parameter you pass to control the DB read/write
/// tracking. We use a vector of
/// [TrackedStorageKey](./struct.TrackedStorageKey.html), which is a simple
/// struct used to set if a key has been read or written to.
/// For values that should be skipped entirely, we can just pass `key.into()`.
/// For example:
///
/// ```
/// use frame_benchmarking::TrackedStorageKey;
/// let whitelist: Vec<TrackedStorageKey> = vec![
/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
/// hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
/// Then define a mutable local variable to hold your `BenchmarkBatch` object:
/// ```ignore
/// let mut batches = Vec::<BenchmarkBatch>::new();
/// ````
///
/// Then add the pallets you want to benchmark to this object, using their crate
/// name and generated module struct:
/// add_benchmark!(params, batches, pallet_balances, Balances);
/// add_benchmark!(params, batches, pallet_session, SessionBench::<Runtime>);
/// add_benchmark!(params, batches, frame_system, SystemBench::<Runtime>);
/// At the end of `dispatch_benchmark`, you should return this batches object.
#[macro_export]
macro_rules! add_benchmark {
( $params:ident, $batches:ident, $name:ident, $( $location:tt )* ) => (
let name_string = stringify!($name).as_bytes();
let instance_string = stringify!( $( $location )* ).as_bytes();
let (config, whitelist) = $params;
let $crate::BenchmarkConfig {
pallet,
benchmark,
lowest_range_values,
highest_range_values,
steps,
repeat,
verify,
extra,
} = config;
if &pallet[..] == &name_string[..] || &pallet[..] == &b"*"[..] {
if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] {
for benchmark in $( $location )*::Benchmark::benchmarks(*extra).into_iter() {
results: $( $location )*::Benchmark::run_benchmark(
benchmark,
&lowest_range_values[..],
&highest_range_values[..],
&steps[..],
).map_err(|e| {
$crate::show_benchmark_debug_info(
instance_string,
benchmark,
lowest_range_values,
highest_range_values,
steps,
repeat,
verify,
e,
)
})?,
});
}
} else {
$batches.push($crate::BenchmarkBatch {
results: $( $location )*::Benchmark::run_benchmark(
&benchmark[..],
&lowest_range_values[..],
&highest_range_values[..],
&steps[..],
).map_err(|e| {
$crate::show_benchmark_debug_info(
instance_string,
benchmark,
lowest_range_values,
highest_range_values,
steps,
repeat,
verify,
e,
)
})?,