Merge #7730: Remove priority estimation

0bd581a add release notes for removal of priority estimation (Alex Morcos)
b2322e0 Remove priority estimation (Alex Morcos)
This commit is contained in:
Wladimir J. van der Laan 2016-11-07 13:23:20 +01:00
commit 3c03dc2cfc
No known key found for this signature in database
GPG Key ID: 74810B012346C9A6
6 changed files with 110 additions and 251 deletions

View File

@ -48,6 +48,15 @@ Low-level RPC changes
an optional third arg, which was always ignored. Make sure to never pass more
than two arguments.
Removal of Priority Estimation
------------------------------
- Estimation of "priority" needed for a transaction to be included within a target
number of blocks has been removed. The rpc calls are deprecated and will either
return -1 or 1e24 appropriately. The format for fee_estimates.dat has also
changed to no longer save these priority estimates. It will automatically be
converted to the new format which is not readable by prior versions of the
software.
0.14.0 Change log
=================

View File

@ -14,10 +14,9 @@
#include "util.h"
void TxConfirmStats::Initialize(std::vector<double>& defaultBuckets,
unsigned int maxConfirms, double _decay, std::string _dataTypeString)
unsigned int maxConfirms, double _decay)
{
decay = _decay;
dataTypeString = _dataTypeString;
for (unsigned int i = 0; i < defaultBuckets.size(); i++) {
buckets.push_back(defaultBuckets[i]);
bucketMap[defaultBuckets[i]] = i;
@ -87,10 +86,10 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
int maxbucketindex = buckets.size() - 1;
// requireGreater means we are looking for the lowest fee/priority such that all higher
// values pass, so we start at maxbucketindex (highest fee) and look at successively
// requireGreater means we are looking for the lowest feerate such that all higher
// values pass, so we start at maxbucketindex (highest feerate) and look at successively
// smaller buckets until we reach failure. Otherwise, we are looking for the highest
// fee/priority such that all lower values fail, and we go in the opposite direction.
// feerate such that all lower values fail, and we go in the opposite direction.
unsigned int startbucket = requireGreater ? maxbucketindex : 0;
int step = requireGreater ? -1 : 1;
@ -107,7 +106,7 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
bool foundAnswer = false;
unsigned int bins = unconfTxs.size();
// Start counting from highest(default) or lowest fee/pri transactions
// Start counting from highest(default) or lowest feerate transactions
for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) {
curFarBucket = bucket;
nConf += confAvg[confTarget - 1][bucket];
@ -145,8 +144,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
double median = -1;
double txSum = 0;
// Calculate the "average" fee of the best bucket range that met success conditions
// Find the bucket with the median transaction and then report the average fee from that bucket
// Calculate the "average" feerate of the best bucket range that met success conditions
// Find the bucket with the median transaction and then report the average feerate from that bucket
// This is a compromise between finding the median which we can't since we don't save all tx's
// and reporting the average which is less accurate
unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket;
@ -166,8 +165,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
}
}
LogPrint("estimatefee", "%3d: For conf success %s %4.2f need %s %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
confTarget, requireGreater ? ">" : "<", successBreakPoint, dataTypeString,
LogPrint("estimatefee", "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
confTarget, requireGreater ? ">" : "<", successBreakPoint,
requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket],
100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum);
@ -200,10 +199,10 @@ void TxConfirmStats::Read(CAutoFile& filein)
filein >> fileBuckets;
numBuckets = fileBuckets.size();
if (numBuckets <= 1 || numBuckets > 1000)
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 fee/pri buckets");
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
filein >> fileAvg;
if (fileAvg.size() != numBuckets)
throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri average bucket count");
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count");
filein >> fileTxCtAvg;
if (fileTxCtAvg.size() != numBuckets)
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
@ -213,9 +212,9 @@ void TxConfirmStats::Read(CAutoFile& filein)
throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
for (unsigned int i = 0; i < maxConfirms; i++) {
if (fileConfAvg[i].size() != numBuckets)
throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri conf average bucket count");
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
}
// Now that we've processed the entire fee estimate data file and not
// Now that we've processed the entire feerate estimate data file and not
// thrown any errors, we can copy it to our data structures
decay = fileDecay;
buckets = fileBuckets;
@ -242,8 +241,8 @@ void TxConfirmStats::Read(CAutoFile& filein)
for (unsigned int i = 0; i < buckets.size(); i++)
bucketMap[buckets[i]] = i;
LogPrint("estimatefee", "Reading estimates: %u %s buckets counting confirms up to %u blocks\n",
numBuckets, dataTypeString, maxConfirms);
LogPrint("estimatefee", "Reading estimates: %u buckets counting confirms up to %u blocks\n",
numBuckets, maxConfirms);
}
unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
@ -251,7 +250,6 @@ unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
unsigned int blockIndex = nBlockHeight % unconfTxs.size();
unconfTxs[blockIndex][bucketindex]++;
LogPrint("estimatefee", "adding to %s", dataTypeString);
return bucketindex;
}
@ -291,12 +289,10 @@ void CBlockPolicyEstimator::removeTx(uint256 hash)
hash.ToString().c_str());
return;
}
TxConfirmStats *stats = pos->second.stats;
unsigned int entryHeight = pos->second.blockHeight;
unsigned int bucketIndex = pos->second.bucketIndex;
if (stats != NULL)
stats->removeTx(entryHeight, nBestSeenHeight, bucketIndex);
feeStats.removeTx(entryHeight, nBestSeenHeight, bucketIndex);
mapMemPoolTxs.erase(hash);
}
@ -309,45 +305,14 @@ CBlockPolicyEstimator::CBlockPolicyEstimator(const CFeeRate& _minRelayFee)
vfeelist.push_back(bucketBoundary);
}
vfeelist.push_back(INF_FEERATE);
feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "FeeRate");
minTrackedPriority = AllowFreeThreshold() < MIN_PRIORITY ? MIN_PRIORITY : AllowFreeThreshold();
std::vector<double> vprilist;
for (double bucketBoundary = minTrackedPriority; bucketBoundary <= MAX_PRIORITY; bucketBoundary *= PRI_SPACING) {
vprilist.push_back(bucketBoundary);
}
vprilist.push_back(INF_PRIORITY);
priStats.Initialize(vprilist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "Priority");
feeUnlikely = CFeeRate(0);
feeLikely = CFeeRate(INF_FEERATE);
priUnlikely = 0;
priLikely = INF_PRIORITY;
}
bool CBlockPolicyEstimator::isFeeDataPoint(const CFeeRate &fee, double pri)
{
if ((pri < minTrackedPriority && fee >= minTrackedFee) ||
(pri < priUnlikely && fee > feeLikely)) {
return true;
}
return false;
}
bool CBlockPolicyEstimator::isPriDataPoint(const CFeeRate &fee, double pri)
{
if ((fee < minTrackedFee && pri >= minTrackedPriority) ||
(fee < feeUnlikely && pri > priLikely)) {
return true;
}
return false;
feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY);
}
void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool fCurrentEstimate)
{
unsigned int txHeight = entry.GetHeight();
uint256 hash = entry.GetTx().GetHash();
if (mapMemPoolTxs[hash].stats != NULL) {
if (mapMemPoolTxs.count(hash)) {
LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n",
hash.ToString().c_str());
return;
@ -371,30 +336,11 @@ void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, boo
return;
}
// Fees are stored and reported as BTC-per-kb:
// Feerates are stored and reported as BTC-per-kb:
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
// Want the priority of the tx at confirmation. However we don't know
// what that will be and its too hard to continue updating it
// so use starting priority as a proxy
double curPri = entry.GetPriority(txHeight);
mapMemPoolTxs[hash].blockHeight = txHeight;
LogPrint("estimatefee", "Blockpolicy mempool tx %s ", hash.ToString().substr(0,10));
// Record this as a priority estimate
if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) {
mapMemPoolTxs[hash].stats = &priStats;
mapMemPoolTxs[hash].bucketIndex = priStats.NewTx(txHeight, curPri);
}
// Record this as a fee estimate
else if (isFeeDataPoint(feeRate, curPri)) {
mapMemPoolTxs[hash].stats = &feeStats;
mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK());
}
else {
LogPrint("estimatefee", "not adding");
}
LogPrint("estimatefee", "\n");
mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK());
}
void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry& entry)
@ -417,21 +363,10 @@ void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxM
return;
}
// Fees are stored and reported as BTC-per-kb:
// Feerates are stored and reported as BTC-per-kb:
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
// Want the priority of the tx at confirmation. The priority when it
// entered the mempool could easily be very small and change quickly
double curPri = entry.GetPriority(nBlockHeight);
// Record this as a priority estimate
if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) {
priStats.Record(blocksToConfirm, curPri);
}
// Record this as a fee estimate
else if (isFeeDataPoint(feeRate, curPri)) {
feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK());
}
feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK());
}
void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
@ -452,41 +387,15 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
if (!fCurrentEstimate)
return;
// Update the dynamic cutoffs
// a fee/priority is "likely" the reason your tx was included in a block if >85% of such tx's
// were confirmed in 2 blocks and is "unlikely" if <50% were confirmed in 10 blocks
LogPrint("estimatefee", "Blockpolicy recalculating dynamic cutoffs:\n");
priLikely = priStats.EstimateMedianVal(2, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBlockHeight);
if (priLikely == -1)
priLikely = INF_PRIORITY;
double feeLikelyEst = feeStats.EstimateMedianVal(2, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBlockHeight);
if (feeLikelyEst == -1)
feeLikely = CFeeRate(INF_FEERATE);
else
feeLikely = CFeeRate(feeLikelyEst);
priUnlikely = priStats.EstimateMedianVal(10, SUFFICIENT_PRITXS, UNLIKELY_PCT, false, nBlockHeight);
if (priUnlikely == -1)
priUnlikely = 0;
double feeUnlikelyEst = feeStats.EstimateMedianVal(10, SUFFICIENT_FEETXS, UNLIKELY_PCT, false, nBlockHeight);
if (feeUnlikelyEst == -1)
feeUnlikely = CFeeRate(0);
else
feeUnlikely = CFeeRate(feeUnlikelyEst);
// Clear the current block states
// Clear the current block state
feeStats.ClearCurrent(nBlockHeight);
priStats.ClearCurrent(nBlockHeight);
// Repopulate the current block states
for (unsigned int i = 0; i < entries.size(); i++)
processBlockTx(nBlockHeight, entries[i]);
// Update all exponential averages with the current block states
// Update all exponential averages with the current block state
feeStats.UpdateMovingAverages();
priStats.UpdateMovingAverages();
LogPrint("estimatefee", "Blockpolicy after updating estimates for %u confirmed entries, new mempool map size %u\n",
entries.size(), mapMemPoolTxs.size());
@ -522,7 +431,7 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun
if (answerFoundAtTarget)
*answerFoundAtTarget = confTarget - 1;
// If mempool is limiting txs , return at least the min fee from the mempool
// If mempool is limiting txs , return at least the min feerate from the mempool
CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
if (minPoolFee > 0 && minPoolFee > median)
return CFeeRate(minPoolFee);
@ -535,51 +444,38 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun
double CBlockPolicyEstimator::estimatePriority(int confTarget)
{
// Return failure if trying to analyze a target we're not tracking
if (confTarget <= 0 || (unsigned int)confTarget > priStats.GetMaxConfirms())
return -1;
return priStats.EstimateMedianVal(confTarget, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
return -1;
}
double CBlockPolicyEstimator::estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool)
{
if (answerFoundAtTarget)
*answerFoundAtTarget = confTarget;
// Return failure if trying to analyze a target we're not tracking
if (confTarget <= 0 || (unsigned int)confTarget > priStats.GetMaxConfirms())
return -1;
// If mempool is limiting txs, no priority txs are allowed
CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
if (minPoolFee > 0)
return INF_PRIORITY;
double median = -1;
while (median < 0 && (unsigned int)confTarget <= priStats.GetMaxConfirms()) {
median = priStats.EstimateMedianVal(confTarget++, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
}
if (answerFoundAtTarget)
*answerFoundAtTarget = confTarget - 1;
return median;
return -1;
}
void CBlockPolicyEstimator::Write(CAutoFile& fileout)
{
fileout << nBestSeenHeight;
feeStats.Write(fileout);
priStats.Write(fileout);
}
void CBlockPolicyEstimator::Read(CAutoFile& filein)
void CBlockPolicyEstimator::Read(CAutoFile& filein, int nFileVersion)
{
int nFileBestSeenHeight;
filein >> nFileBestSeenHeight;
feeStats.Read(filein);
priStats.Read(filein);
nBestSeenHeight = nFileBestSeenHeight;
if (nFileVersion < 139900) {
TxConfirmStats priStats;
priStats.Read(filein);
}
}
FeeFilterRounder::FeeFilterRounder(const CFeeRate& minIncrementalFee)

View File

@ -19,60 +19,50 @@ class CTxMemPoolEntry;
class CTxMemPool;
/** \class CBlockPolicyEstimator
* The BlockPolicyEstimator is used for estimating the fee or priority needed
* The BlockPolicyEstimator is used for estimating the feerate needed
* for a transaction to be included in a block within a certain number of
* blocks.
*
* At a high level the algorithm works by grouping transactions into buckets
* based on having similar priorities or fees and then tracking how long it
* based on having similar feerates and then tracking how long it
* takes transactions in the various buckets to be mined. It operates under
* the assumption that in general transactions of higher fee/priority will be
* included in blocks before transactions of lower fee/priority. So for
* example if you wanted to know what fee you should put on a transaction to
* the assumption that in general transactions of higher feerate will be
* included in blocks before transactions of lower feerate. So for
* example if you wanted to know what feerate you should put on a transaction to
* be included in a block within the next 5 blocks, you would start by looking
* at the bucket with the highest fee transactions and verifying that a
* at the bucket with the highest feerate transactions and verifying that a
* sufficiently high percentage of them were confirmed within 5 blocks and
* then you would look at the next highest fee bucket, and so on, stopping at
* the last bucket to pass the test. The average fee of transactions in this
* bucket will give you an indication of the lowest fee you can put on a
* then you would look at the next highest feerate bucket, and so on, stopping at
* the last bucket to pass the test. The average feerate of transactions in this
* bucket will give you an indication of the lowest feerate you can put on a
* transaction and still have a sufficiently high chance of being confirmed
* within your desired 5 blocks.
*
* When a transaction enters the mempool or is included within a block we
* decide whether it can be used as a data point for fee estimation, priority
* estimation or neither. If the value of exactly one of those properties was
* below the required minimum it can be used to estimate the other. In
* addition, if a priori our estimation code would indicate that the
* transaction would be much more quickly included in a block because of one
* of the properties compared to the other, we can also decide to use it as
* an estimate for that property.
*
* Here is a brief description of the implementation for fee estimation.
* When a transaction that counts for fee estimation enters the mempool, we
* Here is a brief description of the implementation:
* When a transaction enters the mempool, we
* track the height of the block chain at entry. Whenever a block comes in,
* we count the number of transactions in each bucket and the total amount of fee
* we count the number of transactions in each bucket and the total amount of feerate
* paid in each bucket. Then we calculate how many blocks Y it took each
* transaction to be mined and we track an array of counters in each bucket
* for how long it to took transactions to get confirmed from 1 to a max of 25
* and we increment all the counters from Y up to 25. This is because for any
* number Z>=Y the transaction was successfully mined within Z blocks. We
* want to save a history of this information, so at any time we have a
* counter of the total number of transactions that happened in a given fee
* counter of the total number of transactions that happened in a given feerate
* bucket and the total number that were confirmed in each number 1-25 blocks
* or less for any bucket. We save this history by keeping an exponentially
* decaying moving average of each one of these stats. Furthermore we also
* keep track of the number unmined (in mempool) transactions in each bucket
* and for how many blocks they have been outstanding and use that to increase
* the number of transactions we've seen in that fee bucket when calculating
* the number of transactions we've seen in that feerate bucket when calculating
* an estimate for any number of confirmations below the number of blocks
* they've been outstanding.
*/
/**
* We will instantiate two instances of this class, one to track transactions
* that were included in a block due to fee, and one for tx's included due to
* priority. We will lump transactions into a bucket according to their approximate
* fee or priority and then track how long it took for those txs to be included in a block
* We will instantiate an instance of this class to track transactions that were
* included in a block. We will lump transactions into a bucket according to their
* approximate feerate and then track how long it took for those txs to be included in a block
*
* The tracking of unconfirmed (mempool) transactions is completely independent of the
* historical tracking of transactions that have been confirmed in a block.
@ -80,7 +70,7 @@ class CTxMemPool;
class TxConfirmStats
{
private:
//Define the buckets we will group transactions into (both fee buckets and priority buckets)
//Define the buckets we will group transactions into
std::vector<double> buckets; // The upper-bound of the range for the bucket (inclusive)
std::map<double, unsigned int> bucketMap; // Map of bucket upper-bound to index into all vectors by bucket
@ -97,16 +87,15 @@ private:
// and calculate the totals for the current block to update the moving averages
std::vector<std::vector<int> > curBlockConf; // curBlockConf[Y][X]
// Sum the total priority/fee of all tx's in each bucket
// Sum the total feerate of all tx's in each bucket
// Track the historical moving average of this total over blocks
std::vector<double> avg;
// and calculate the total for the current block to update the moving average
std::vector<double> curBlockVal;
// Combine the conf counts with tx counts to calculate the confirmation % for each Y,X
// Combine the total value with the tx counts to calculate the avg fee/priority per bucket
// Combine the total value with the tx counts to calculate the avg feerate per bucket
std::string dataTypeString;
double decay;
// Mempool counts of outstanding transactions
@ -123,9 +112,8 @@ public:
* @param defaultBuckets contains the upper limits for the bucket boundaries
* @param maxConfirms max number of confirms to track
* @param decay how much to decay the historical moving average per block
* @param dataTypeString for logging purposes
*/
void Initialize(std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay, std::string dataTypeString);
void Initialize(std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay);
/** Clear the state of the curBlock variables to start counting for the new block */
void ClearCurrent(unsigned int nBlockHeight);
@ -133,7 +121,7 @@ public:
/**
* Record a new transaction data point in the current block stats
* @param blocksToConfirm the number of blocks it took this transaction to confirm
* @param val either the fee or the priority when entered of the transaction
* @param val the feerate of the transaction
* @warning blocksToConfirm is 1-based and has to be >= 1
*/
void Record(int blocksToConfirm, double val);
@ -150,14 +138,14 @@ public:
void UpdateMovingAverages();
/**
* Calculate a fee or priority estimate. Find the lowest value bucket (or range of buckets
* Calculate a feerate estimate. Find the lowest value bucket (or range of buckets
* to make sure we have enough data points) whose transactions still have sufficient likelihood
* of being confirmed within the target number of confirmations
* @param confTarget target number of confirmations
* @param sufficientTxVal required average number of transactions per block in a bucket range
* @param minSuccess the success probability we require
* @param requireGreater return the lowest fee/pri such that all higher values pass minSuccess OR
* return the highest fee/pri such that all lower values fail minSuccess
* @param requireGreater return the lowest feerate such that all higher values pass minSuccess OR
* return the highest feerate such that all lower values fail minSuccess
* @param nBlockHeight the current block height
*/
double EstimateMedianVal(int confTarget, double sufficientTxVal,
@ -184,35 +172,27 @@ static const unsigned int MAX_BLOCK_CONFIRMS = 25;
/** Decay of .998 is a half-life of 346 blocks or about 2.4 days */
static const double DEFAULT_DECAY = .998;
/** Require greater than 95% of X fee transactions to be confirmed within Y blocks for X to be big enough */
/** Require greater than 95% of X feerate transactions to be confirmed within Y blocks for X to be big enough */
static const double MIN_SUCCESS_PCT = .95;
static const double UNLIKELY_PCT = .5;
/** Require an avg of 1 tx in the combined fee bucket per block to have stat significance */
/** Require an avg of 1 tx in the combined feerate bucket per block to have stat significance */
static const double SUFFICIENT_FEETXS = 1;
/** Require only an avg of 1 tx every 5 blocks in the combined pri bucket (way less pri txs) */
static const double SUFFICIENT_PRITXS = .2;
// Minimum and Maximum values for tracking fees and priorities
// Minimum and Maximum values for tracking feerates
static const double MIN_FEERATE = 10;
static const double MAX_FEERATE = 1e7;
static const double INF_FEERATE = MAX_MONEY;
static const double MIN_PRIORITY = 10;
static const double MAX_PRIORITY = 1e16;
static const double INF_PRIORITY = 1e9 * MAX_MONEY;
// We have to lump transactions into buckets based on fee or priority, but we want to be able
// to give accurate estimates over a large range of potential fees and priorities
// We have to lump transactions into buckets based on feerate, but we want to be able
// to give accurate estimates over a large range of potential feerates
// Therefore it makes sense to exponentially space the buckets
/** Spacing of FeeRate buckets */
static const double FEE_SPACING = 1.1;
/** Spacing of Priority buckets */
static const double PRI_SPACING = 2;
/**
* We want to be able to estimate fees or priorities that are needed on tx's to be included in
* We want to be able to estimate feerates that are needed on tx's to be included in
* a certain number of blocks. Every time a block is added to the best chain, this class records
* stats on the transactions included in that block
*/
@ -235,27 +215,26 @@ public:
/** Remove a transaction from the mempool tracking stats*/
void removeTx(uint256 hash);
/** Is this transaction likely included in a block because of its fee?*/
bool isFeeDataPoint(const CFeeRate &fee, double pri);
/** Is this transaction likely included in a block because of its priority?*/
bool isPriDataPoint(const CFeeRate &fee, double pri);
/** Return a fee estimate */
/** Return a feerate estimate */
CFeeRate estimateFee(int confTarget);
/** Estimate fee rate needed to get be included in a block within
/** Estimate feerate needed to get be included in a block within
* confTarget blocks. If no answer can be given at confTarget, return an
* estimate at the lowest target where one can be given.
*/
CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool);
/** Return a priority estimate */
/** Return a priority estimate.
* DEPRECATED
* Returns -1
*/
double estimatePriority(int confTarget);
/** Estimate priority needed to get be included in a block within
* confTarget blocks. If no answer can be given at confTarget, return an
* estimate at the lowest target where one can be given.
* confTarget blocks.
* DEPRECATED
* Returns -1 unless mempool is currently limited then returns INF_PRIORITY
* answerFoundAtTarget is set to confTarget
*/
double estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool);
@ -263,29 +242,23 @@ public:
void Write(CAutoFile& fileout);
/** Read estimation data from a file */
void Read(CAutoFile& filein);
void Read(CAutoFile& filein, int nFileVersion);
private:
CFeeRate minTrackedFee; //!< Passed to constructor to avoid dependency on main
double minTrackedPriority; //!< Set to AllowFreeThreshold
unsigned int nBestSeenHeight;
struct TxStatsInfo
{
TxConfirmStats *stats;
unsigned int blockHeight;
unsigned int bucketIndex;
TxStatsInfo() : stats(NULL), blockHeight(0), bucketIndex(0) {}
TxStatsInfo() : blockHeight(0), bucketIndex(0) {}
};
// map of txids to information about that transaction
std::map<uint256, TxStatsInfo> mapMemPoolTxs;
/** Classes to track historical data on transaction confirmations */
TxConfirmStats feeStats, priStats;
/** Breakpoints to help determine whether a transaction was confirmed by priority or Fee */
CFeeRate feeLikely, feeUnlikely;
double priLikely, priUnlikely;
TxConfirmStats feeStats;
};
class FeeFilterRounder

View File

@ -810,7 +810,7 @@ UniValue estimatepriority(const JSONRPCRequest& request)
if (request.fHelp || request.params.size() != 1)
throw runtime_error(
"estimatepriority nblocks\n"
"\nEstimates the approximate priority a zero-fee transaction needs to begin\n"
"\nDEPRECATED. Estimates the approximate priority a zero-fee transaction needs to begin\n"
"confirmation within nblocks blocks.\n"
"\nArguments:\n"
"1. nblocks (numeric)\n"
@ -873,7 +873,7 @@ UniValue estimatesmartpriority(const JSONRPCRequest& request)
if (request.fHelp || request.params.size() != 1)
throw runtime_error(
"estimatesmartpriority nblocks\n"
"\nWARNING: This interface is unstable and may disappear or change!\n"
"\nDEPRECATED. WARNING: This interface is unstable and may disappear or change!\n"
"\nEstimates the approximate priority a zero-fee transaction needs to begin\n"
"confirmation within nblocks blocks if possible and return the number of blocks\n"
"for which the estimate is valid.\n"

View File

@ -19,26 +19,18 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
CTxMemPool mpool(CFeeRate(1000));
TestMemPoolEntryHelper entry;
CAmount basefee(2000);
double basepri = 1e6;
CAmount deltaFee(100);
double deltaPri=5e5;
std::vector<CAmount> feeV[2];
std::vector<double> priV[2];
std::vector<CAmount> feeV;
// Populate vectors of increasing fees or priorities
// Populate vectors of increasing fees
for (int j = 0; j < 10; j++) {
//V[0] is for fee transactions
feeV[0].push_back(basefee * (j+1));
priV[0].push_back(0);
//V[1] is for priority transactions
feeV[1].push_back(CAmount(0));
priV[1].push_back(basepri * pow(10, j+1));
feeV.push_back(basefee * (j+1));
}
// Store the hashes of transactions that have been
// added to the mempool by their associate fee/pri
// added to the mempool by their associate fee
// txHashes[j] is populated with transactions either of
// fee = basefee * (j+1) OR pri = 10^6 * 10^(j+1)
// fee = basefee * (j+1)
std::vector<uint256> txHashes[10];
// Create a transaction template
@ -60,19 +52,19 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
// At a decay .998 and 4 fee transactions per block
// This makes the tx count about 1.33 per bucket, above the 1 threshold
while (blocknum < 200) {
for (int j = 0; j < 10; j++) { // For each fee/pri multiple
for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx
for (int j = 0; j < 10; j++) { // For each fee
for (int k = 0; k < 4; k++) { // add 4 fee txs
tx.vin[0].prevout.n = 10000*blocknum+100*j+k; // make transaction unique
uint256 hash = tx.GetHash();
mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool));
mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
txHashes[j].push_back(hash);
}
}
//Create blocks where higher fee/pri txs are included more often
//Create blocks where higher fee txs are included more often
for (int h = 0; h <= blocknum%10; h++) {
// 10/10 blocks add highest fee/pri transactions
// 10/10 blocks add highest fee transactions
// 9/10 blocks add 2nd highest and so on until ...
// 1/10 blocks add lowest fee/pri transactions
// 1/10 blocks add lowest fee transactions
while (txHashes[9-h].size()) {
std::shared_ptr<const CTransaction> ptx = mpool.get(txHashes[9-h].back());
if (ptx)
@ -100,7 +92,6 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
}
std::vector<CAmount> origFeeEst;
std::vector<double> origPriEst;
// Highest feerate is 10*baseRate and gets in all blocks,
// second highest feerate is 9*baseRate and gets in 9/10 blocks = 90%,
// third highest feerate is 8*base rate, and gets in 8/10 blocks = 80%,
@ -109,16 +100,12 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
// so estimateFee(2) should return 9*baseRate etc...
for (int i = 1; i < 10;i++) {
origFeeEst.push_back(mpool.estimateFee(i).GetFeePerK());
origPriEst.push_back(mpool.estimatePriority(i));
if (i > 1) { // Fee estimates should be monotonically decreasing
BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]);
BOOST_CHECK(origPriEst[i-1] <= origPriEst[i-2]);
}
int mult = 11-i;
BOOST_CHECK(origFeeEst[i-1] < mult*baseRate.GetFeePerK() + deltaFee);
BOOST_CHECK(origFeeEst[i-1] > mult*baseRate.GetFeePerK() - deltaFee);
BOOST_CHECK(origPriEst[i-1] < pow(10,mult) * basepri + deltaPri);
BOOST_CHECK(origPriEst[i-1] > pow(10,mult) * basepri - deltaPri);
}
// Mine 50 more blocks with no transactions happening, estimates shouldn't change
@ -129,19 +116,17 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
for (int i = 1; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] + deltaFee);
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] + deltaPri);
BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
}
// Mine 15 more blocks with lots of transactions happening and not getting mined
// Estimates should go up
while (blocknum < 265) {
for (int j = 0; j < 10; j++) { // For each fee/pri multiple
for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx
for (int j = 0; j < 10; j++) { // For each fee multiple
for (int k = 0; k < 4; k++) { // add 4 fee txs
tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
uint256 hash = tx.GetHash();
mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool));
mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
txHashes[j].push_back(hash);
}
}
@ -152,8 +137,6 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
for (int i = 1; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i) == CFeeRate(0) || mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
BOOST_CHECK(mpool.estimateSmartFee(i, &answerFound).GetFeePerK() > origFeeEst[answerFound-1] - deltaFee);
BOOST_CHECK(mpool.estimatePriority(i) == -1 || mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
BOOST_CHECK(mpool.estimateSmartPriority(i, &answerFound) > origPriEst[answerFound-1] - deltaPri);
}
// Mine all those transactions
@ -170,20 +153,20 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
block.clear();
for (int i = 1; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
}
// Mine 200 more blocks where everything is mined every block
// Estimates should be below original estimates
while (blocknum < 465) {
for (int j = 0; j < 10; j++) { // For each fee/pri multiple
for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx
for (int j = 0; j < 10; j++) { // For each fee multiple
for (int k = 0; k < 4; k++) { // add 4 fee txs
tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
uint256 hash = tx.GetHash();
mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool));
mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
std::shared_ptr<const CTransaction> ptx = mpool.get(hash);
if (ptx)
block.push_back(*ptx);
}
}
mpool.removeForBlock(block, ++blocknum);
@ -191,15 +174,14 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
}
for (int i = 1; i < 10; i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee);
BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] - deltaPri);
}
// Test that if the mempool is limited, estimateSmartFee won't return a value below the mempool min fee
// and that estimateSmartPriority returns essentially an infinite value
mpool.addUnchecked(tx.GetHash(), entry.Fee(feeV[0][5]).Time(GetTime()).Priority(priV[1][5]).Height(blocknum).FromTx(tx, &mpool));
// evict that transaction which should set a mempool min fee of minRelayTxFee + feeV[0][5]
mpool.addUnchecked(tx.GetHash(), entry.Fee(feeV[5]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
// evict that transaction which should set a mempool min fee of minRelayTxFee + feeV[5]
mpool.TrimToSize(1);
BOOST_CHECK(mpool.GetMinFee(1).GetFeePerK() > feeV[0][5]);
BOOST_CHECK(mpool.GetMinFee(1).GetFeePerK() > feeV[5]);
for (int i = 1; i < 10; i++) {
BOOST_CHECK(mpool.estimateSmartFee(i).GetFeePerK() >= mpool.estimateFee(i).GetFeePerK());
BOOST_CHECK(mpool.estimateSmartFee(i).GetFeePerK() >= mpool.GetMinFee(1).GetFeePerK());

View File

@ -895,7 +895,7 @@ CTxMemPool::WriteFeeEstimates(CAutoFile& fileout) const
{
try {
LOCK(cs);
fileout << 109900; // version required to read: 0.10.99 or later
fileout << 139900; // version required to read: 0.13.99 or later
fileout << CLIENT_VERSION; // version that wrote the file
minerPolicyEstimator->Write(fileout);
}
@ -914,9 +914,8 @@ CTxMemPool::ReadFeeEstimates(CAutoFile& filein)
filein >> nVersionRequired >> nVersionThatWrote;
if (nVersionRequired > CLIENT_VERSION)
return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee estimate file", nVersionRequired);
LOCK(cs);
minerPolicyEstimator->Read(filein);
minerPolicyEstimator->Read(filein, nVersionThatWrote);
}
catch (const std::exception&) {
LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy estimator data (non-fatal)\n");