Merge #7730: Remove priority estimation

0bd581a add release notes for removal of priority estimation (Alex Morcos)
b2322e0 Remove priority estimation (Alex Morcos)

(cherry picked from commit bitcoin/bitcoin@3c03dc2cfc)

Zcash:
* MAX_PRIORITY is still needed because it's used for the priority of
  shielded transactions.
* Changes relating to "smart priority" are omitted since we do not have
  that.
* Change the new minimum client version for `fee_estimates.dat` to
  FEE_ESTIMATES_WITHOUT_PRIORITY_VERSION == 5050000. Immediately make
  sure we write at least that version and accept it for reading.

Signed-off-by: Daira Emma Hopwood <daira@jacaranda.org>
This commit is contained in:
Wladimir J. van der Laan 2016-11-07 13:23:20 +01:00 committed by Daira Emma Hopwood
parent 16f90b06e8
commit 12291b2e41
7 changed files with 111 additions and 235 deletions

View File

@ -29,6 +29,16 @@ RPC Changes
is unable to construct the transaction without selecting funds from the transparent is unable to construct the transaction without selecting funds from the transparent
pool, so the impact of this change is that for such transactions, the user must specify pool, so the impact of this change is that for such transactions, the user must specify
`AllowFullyTransparent`. `AllowFullyTransparent`.
- The `estimatepriority` RPC call now always returns -1.
Removal of Priority Estimation
------------------------------
- Estimation of "priority" needed for a transaction to be included within a target
number of blocks has been removed. The `estimatepriority` RPC call now always
returns -1. The format for `fee_estimates.dat` has also changed to no longer save
these priority estimates. It will automatically be converted to the new format
which is not readable by prior versions of the software.
[Deprecations](https://zcash.github.io/zcash/user/deprecation.html) [Deprecations](https://zcash.github.io/zcash/user/deprecation.html)
-------------- --------------

View File

@ -12,10 +12,9 @@
#include "util/system.h" #include "util/system.h"
void TxConfirmStats::Initialize(std::vector<double>& defaultBuckets, void TxConfirmStats::Initialize(std::vector<double>& defaultBuckets,
unsigned int maxConfirms, double _decay, std::string _dataTypeString) unsigned int maxConfirms, double _decay)
{ {
decay = _decay; decay = _decay;
dataTypeString = _dataTypeString;
buckets.insert(buckets.end(), defaultBuckets.begin(), defaultBuckets.end()); buckets.insert(buckets.end(), defaultBuckets.begin(), defaultBuckets.end());
buckets.push_back(std::numeric_limits<double>::infinity()); buckets.push_back(std::numeric_limits<double>::infinity());
@ -95,10 +94,10 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
int maxbucketindex = buckets.size() - 1; int maxbucketindex = buckets.size() - 1;
// requireGreater means we are looking for the lowest fee/priority such that all higher // requireGreater means we are looking for the lowest feerate such that all higher
// values pass, so we start at maxbucketindex (highest fee) and look at succesively // values pass, so we start at maxbucketindex (highest feerate) and look at successively
// smaller buckets until we reach failure. Otherwise, we are looking for the highest // smaller buckets until we reach failure. Otherwise, we are looking for the highest
// fee/priority such that all lower values fail, and we go in the opposite direction. // feerate such that all lower values fail, and we go in the opposite direction.
unsigned int startbucket = requireGreater ? maxbucketindex : 0; unsigned int startbucket = requireGreater ? maxbucketindex : 0;
int step = requireGreater ? -1 : 1; int step = requireGreater ? -1 : 1;
@ -115,7 +114,7 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
bool foundAnswer = false; bool foundAnswer = false;
unsigned int bins = unconfTxs.size(); unsigned int bins = unconfTxs.size();
// Start counting from highest(default) or lowest fee/pri transactions // Start counting from highest(default) or lowest feerate transactions
for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) { for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) {
curFarBucket = bucket; curFarBucket = bucket;
nConf += confAvg[confTarget - 1][bucket]; nConf += confAvg[confTarget - 1][bucket];
@ -153,8 +152,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
double median = -1; double median = -1;
double txSum = 0; double txSum = 0;
// Calculate the "average" fee of the best bucket range that met success conditions // Calculate the "average" feerate of the best bucket range that met success conditions
// Find the bucket with the median transaction and then report the average fee from that bucket // Find the bucket with the median transaction and then report the average feerate from that bucket
// This is a compromise between finding the median which we can't since we don't save all tx's // This is a compromise between finding the median which we can't since we don't save all tx's
// and reporting the average which is less accurate // and reporting the average which is less accurate
unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket; unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket;
@ -174,8 +173,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
} }
} }
LogPrint("estimatefee", "%3d: For conf success %s %4.2f need %s %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n", LogPrint("estimatefee", "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
confTarget, requireGreater ? ">" : "<", successBreakPoint, dataTypeString, confTarget, requireGreater ? ">" : "<", successBreakPoint,
requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket], requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket],
100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum); 100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum);
@ -208,10 +207,10 @@ void TxConfirmStats::Read(CAutoFile& filein)
filein >> fileBuckets; filein >> fileBuckets;
numBuckets = fileBuckets.size(); numBuckets = fileBuckets.size();
if (numBuckets <= 1 || numBuckets > 1000) if (numBuckets <= 1 || numBuckets > 1000)
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 fee/pri buckets"); throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
filein >> fileAvg; filein >> fileAvg;
if (fileAvg.size() != numBuckets) if (fileAvg.size() != numBuckets)
throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri average bucket count"); throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count");
filein >> fileTxCtAvg; filein >> fileTxCtAvg;
if (fileTxCtAvg.size() != numBuckets) if (fileTxCtAvg.size() != numBuckets)
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count"); throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
@ -221,9 +220,9 @@ void TxConfirmStats::Read(CAutoFile& filein)
throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms"); throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
for (unsigned int i = 0; i < maxConfirms; i++) { for (unsigned int i = 0; i < maxConfirms; i++) {
if (fileConfAvg[i].size() != numBuckets) if (fileConfAvg[i].size() != numBuckets)
throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri conf average bucket count"); throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
} }
// Now that we've processed the entire fee estimate data file and not // Now that we've processed the entire feerate estimate data file and not
// thrown any errors, we can copy it to our data structures // thrown any errors, we can copy it to our data structures
decay = fileDecay; decay = fileDecay;
buckets = fileBuckets; buckets = fileBuckets;
@ -250,8 +249,8 @@ void TxConfirmStats::Read(CAutoFile& filein)
for (unsigned int i = 0; i < buckets.size(); i++) for (unsigned int i = 0; i < buckets.size(); i++)
bucketMap[buckets[i]] = i; bucketMap[buckets[i]] = i;
LogPrint("estimatefee", "Reading estimates: %u %s buckets counting confirms up to %u blocks\n", LogPrint("estimatefee", "Reading estimates: %u buckets counting confirms up to %u blocks\n",
numBuckets, dataTypeString, maxConfirms); numBuckets, maxConfirms);
} }
unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val) unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
@ -259,7 +258,6 @@ unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
unsigned int bucketindex = FindBucketIndex(val); unsigned int bucketindex = FindBucketIndex(val);
unsigned int blockIndex = nBlockHeight % unconfTxs.size(); unsigned int blockIndex = nBlockHeight % unconfTxs.size();
unconfTxs[blockIndex][bucketindex]++; unconfTxs[blockIndex][bucketindex]++;
LogPrint("estimatefee", "adding to %s", dataTypeString);
return bucketindex; return bucketindex;
} }
@ -299,12 +297,10 @@ void CBlockPolicyEstimator::removeTx(uint256 hash)
hash.ToString().c_str()); hash.ToString().c_str());
return; return;
} }
TxConfirmStats *stats = pos->second.stats;
unsigned int entryHeight = pos->second.blockHeight; unsigned int entryHeight = pos->second.blockHeight;
unsigned int bucketIndex = pos->second.bucketIndex; unsigned int bucketIndex = pos->second.bucketIndex;
if (stats != NULL) feeStats.removeTx(entryHeight, nBestSeenHeight, bucketIndex);
stats->removeTx(entryHeight, nBestSeenHeight, bucketIndex);
mapMemPoolTxs.erase(hash); mapMemPoolTxs.erase(hash);
} }
@ -316,44 +312,14 @@ CBlockPolicyEstimator::CBlockPolicyEstimator(const CFeeRate& _minRelayFee)
for (double bucketBoundary = minTrackedFee.GetFeePerK(); bucketBoundary <= MAX_FEERATE; bucketBoundary *= FEE_SPACING) { for (double bucketBoundary = minTrackedFee.GetFeePerK(); bucketBoundary <= MAX_FEERATE; bucketBoundary *= FEE_SPACING) {
vfeelist.push_back(bucketBoundary); vfeelist.push_back(bucketBoundary);
} }
feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "FeeRate"); feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY);
minTrackedPriority = AllowFreeThreshold() < MIN_PRIORITY ? MIN_PRIORITY : AllowFreeThreshold();
std::vector<double> vprilist;
for (double bucketBoundary = minTrackedPriority; bucketBoundary <= MAX_PRIORITY; bucketBoundary *= PRI_SPACING) {
vprilist.push_back(bucketBoundary);
}
priStats.Initialize(vprilist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "Priority");
feeUnlikely = CFeeRate(0);
feeLikely = CFeeRate(INF_FEERATE);
priUnlikely = 0;
priLikely = INF_PRIORITY;
}
bool CBlockPolicyEstimator::isFeeDataPoint(const CFeeRate &fee, double pri)
{
if ((pri < minTrackedPriority && fee >= minTrackedFee) ||
(pri < priUnlikely && fee > feeLikely)) {
return true;
}
return false;
}
bool CBlockPolicyEstimator::isPriDataPoint(const CFeeRate &fee, double pri)
{
if ((fee < minTrackedFee && pri >= minTrackedPriority) ||
(fee < feeUnlikely && pri > priLikely)) {
return true;
}
return false;
} }
void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool fCurrentEstimate) void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool fCurrentEstimate)
{ {
unsigned int txHeight = entry.GetHeight(); unsigned int txHeight = entry.GetHeight();
uint256 hash = entry.GetTx().GetHash(); uint256 hash = entry.GetTx().GetHash();
if (mapMemPoolTxs[hash].stats != NULL) { if (mapMemPoolTxs.count(hash)) {
LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n", LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n",
hash.ToString().c_str()); hash.ToString().c_str());
return; return;
@ -377,30 +343,11 @@ void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, boo
return; return;
} }
// Fees are stored and reported as BTC-per-kb: // Feerates are stored and reported as zatoshis per 1000 bytes:
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize()); CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
// Want the priority of the tx at confirmation. However we don't know
// what that will be and its too hard to continue updating it
// so use starting priority as a proxy
double curPri = entry.GetPriority(txHeight);
mapMemPoolTxs[hash].blockHeight = txHeight; mapMemPoolTxs[hash].blockHeight = txHeight;
LogPrint("estimatefee", "Blockpolicy mempool tx %s ", hash.ToString().substr(0,10));
// Record this as a priority estimate
if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) {
mapMemPoolTxs[hash].stats = &priStats;
mapMemPoolTxs[hash].bucketIndex = priStats.NewTx(txHeight, curPri);
}
// Record this as a fee estimate
else if (isFeeDataPoint(feeRate, curPri)) {
mapMemPoolTxs[hash].stats = &feeStats;
mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK()); mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK());
}
else {
LogPrint("estimatefee", "not adding");
}
LogPrint("estimatefee", "\n");
} }
void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry& entry) void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry& entry)
@ -423,21 +370,10 @@ void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxM
return; return;
} }
// Fees are stored and reported as BTC-per-kb: // Feerates are stored and reported as zatoshis per 1000 bytes:
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize()); CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
// Want the priority of the tx at confirmation. The priority when it
// entered the mempool could easily be very small and change quickly
double curPri = entry.GetPriority(nBlockHeight);
// Record this as a priority estimate
if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) {
priStats.Record(blocksToConfirm, curPri);
}
// Record this as a fee estimate
else if (isFeeDataPoint(feeRate, curPri)) {
feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK()); feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK());
}
} }
void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight, void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
@ -458,41 +394,15 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
if (!fCurrentEstimate) if (!fCurrentEstimate)
return; return;
// Update the dynamic cutoffs // Clear the current block state
// a fee/priority is "likely" the reason your tx was included in a block if >85% of such tx's
// were confirmed in 2 blocks and is "unlikely" if <50% were confirmed in 10 blocks
LogPrint("estimatefee", "Blockpolicy recalculating dynamic cutoffs:\n");
priLikely = priStats.EstimateMedianVal(2, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBlockHeight);
if (priLikely == -1)
priLikely = INF_PRIORITY;
double feeLikelyEst = feeStats.EstimateMedianVal(2, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBlockHeight);
if (feeLikelyEst == -1)
feeLikely = CFeeRate(INF_FEERATE);
else
feeLikely = CFeeRate(feeLikelyEst);
priUnlikely = priStats.EstimateMedianVal(10, SUFFICIENT_PRITXS, UNLIKELY_PCT, false, nBlockHeight);
if (priUnlikely == -1)
priUnlikely = 0;
double feeUnlikelyEst = feeStats.EstimateMedianVal(10, SUFFICIENT_FEETXS, UNLIKELY_PCT, false, nBlockHeight);
if (feeUnlikelyEst == -1)
feeUnlikely = CFeeRate(0);
else
feeUnlikely = CFeeRate(feeUnlikelyEst);
// Clear the current block states
feeStats.ClearCurrent(nBlockHeight); feeStats.ClearCurrent(nBlockHeight);
priStats.ClearCurrent(nBlockHeight);
// Repopulate the current block states // Repopulate the current block states
for (unsigned int i = 0; i < entries.size(); i++) for (unsigned int i = 0; i < entries.size(); i++)
processBlockTx(nBlockHeight, entries[i]); processBlockTx(nBlockHeight, entries[i]);
// Update all exponential averages with the current block states // Update all exponential averages with the current block state
feeStats.UpdateMovingAverages(); feeStats.UpdateMovingAverages();
priStats.UpdateMovingAverages();
LogPrint("estimatefee", "Blockpolicy after updating estimates for %u confirmed entries, new mempool map size %u\n", LogPrint("estimatefee", "Blockpolicy after updating estimates for %u confirmed entries, new mempool map size %u\n",
entries.size(), mapMemPoolTxs.size()); entries.size(), mapMemPoolTxs.size());
@ -514,25 +424,23 @@ CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget)
double CBlockPolicyEstimator::estimatePriority(int confTarget) double CBlockPolicyEstimator::estimatePriority(int confTarget)
{ {
// Return failure if trying to analyze a target we're not tracking
if (confTarget <= 0 || (unsigned int)confTarget > priStats.GetMaxConfirms())
return -1; return -1;
return priStats.EstimateMedianVal(confTarget, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
} }
void CBlockPolicyEstimator::Write(CAutoFile& fileout) void CBlockPolicyEstimator::Write(CAutoFile& fileout)
{ {
fileout << nBestSeenHeight; fileout << nBestSeenHeight;
feeStats.Write(fileout); feeStats.Write(fileout);
priStats.Write(fileout);
} }
void CBlockPolicyEstimator::Read(CAutoFile& filein) void CBlockPolicyEstimator::Read(CAutoFile& filein, int nFileVersion)
{ {
int nFileBestSeenHeight; int nFileBestSeenHeight;
filein >> nFileBestSeenHeight; filein >> nFileBestSeenHeight;
feeStats.Read(filein); feeStats.Read(filein);
priStats.Read(filein);
nBestSeenHeight = nFileBestSeenHeight; nBestSeenHeight = nFileBestSeenHeight;
if (nFileVersion < FEE_ESTIMATES_WITHOUT_PRIORITY_VERSION) {
TxConfirmStats priStats;
priStats.Read(filein);
}
} }

View File

@ -19,51 +19,42 @@ class CFeeRate;
class CTxMemPoolEntry; class CTxMemPoolEntry;
/** \class CBlockPolicyEstimator /** \class CBlockPolicyEstimator
* The BlockPolicyEstimator is used for estimating the fee or priority needed * The BlockPolicyEstimator is used for estimating the feerate needed
* for a transaction to be included in a block within a certain number of * for a transaction to be included in a block within a certain number of
* blocks. * blocks.
* *
* At a high level the algorithm works by grouping transactions into buckets * At a high level the algorithm works by grouping transactions into buckets
* based on having similar priorities or fees and then tracking how long it * based on having similar feerates and then tracking how long it
* takes transactions in the various buckets to be mined. It operates under * takes transactions in the various buckets to be mined. It operates under
* the assumption that in general transactions of higher fee/priority will be * the assumption that in general transactions of higher feerate will be
* included in blocks before transactions of lower fee/priority. So for * included in blocks before transactions of lower feerate. So for
* example if you wanted to know what fee you should put on a transaction to * example if you wanted to know what feerate you should put on a transaction to
* be included in a block within the next 5 blocks, you would start by looking * be included in a block within the next 5 blocks, you would start by looking
* at the bucket with the highest fee transactions and verifying that a * at the bucket with the highest feerate transactions and verifying that a
* sufficiently high percentage of them were confirmed within 5 blocks and * sufficiently high percentage of them were confirmed within 5 blocks and
* then you would look at the next highest fee bucket, and so on, stopping at * then you would look at the next highest feerate bucket, and so on, stopping at
* the last bucket to pass the test. The average fee of transactions in this * the last bucket to pass the test. The average feerate of transactions in this
* bucket will give you an indication of the lowest fee you can put on a * bucket will give you an indication of the lowest feerate you can put on a
* transaction and still have a sufficiently high chance of being confirmed * transaction and still have a sufficiently high chance of being confirmed
* within your desired 5 blocks. * within your desired 5 blocks.
* *
* When a transaction enters the mempool or is included within a block we * Here is a brief description of the implementation:
* decide whether it can be used as a data point for fee estimation, priority * When a transaction enters the mempool, we
* estimation or neither. If the value of exactly one of those properties was
* below the required minimum it can be used to estimate the other. In
* addition, if a priori our estimation code would indicate that the
* transaction would be much more quickly included in a block because of one
* of the properties compared to the other, we can also decide to use it as
* an estimate for that property.
*
* Here is a brief description of the implementation for fee estimation.
* When a transaction that counts for fee estimation enters the mempool, we
* track the height of the block chain at entry. Whenever a block comes in, * track the height of the block chain at entry. Whenever a block comes in,
* we count the number of transactions in each bucket and the total amount of fee * we count the number of transactions in each bucket and the total amount of feerate
* paid in each bucket. Then we calculate how many blocks Y it took each * paid in each bucket. Then we calculate how many blocks Y it took each
* transaction to be mined and we track an array of counters in each bucket * transaction to be mined and we track an array of counters in each bucket
* for how long it to took transactions to get confirmed from 1 to a max of 25 * for how long it to took transactions to get confirmed from 1 to a max of 25
* and we increment all the counters from Y up to 25. This is because for any * and we increment all the counters from Y up to 25. This is because for any
* number Z>=Y the transaction was successfully mined within Z blocks. We * number Z>=Y the transaction was successfully mined within Z blocks. We
* want to save a history of this information, so at any time we have a * want to save a history of this information, so at any time we have a
* counter of the total number of transactions that happened in a given fee * counter of the total number of transactions that happened in a given feerate
* bucket and the total number that were confirmed in each number 1-25 blocks * bucket and the total number that were confirmed in each number 1-25 blocks
* or less for any bucket. We save this history by keeping an exponentially * or less for any bucket. We save this history by keeping an exponentially
* decaying moving average of each one of these stats. Furthermore we also * decaying moving average of each one of these stats. Furthermore we also
* keep track of the number unmined (in mempool) transactions in each bucket * keep track of the number unmined (in mempool) transactions in each bucket
* and for how many blocks they have been outstanding and use that to increase * and for how many blocks they have been outstanding and use that to increase
* the number of transactions we've seen in that fee bucket when calculating * the number of transactions we've seen in that feerate bucket when calculating
* an estimate for any number of confirmations below the number of blocks * an estimate for any number of confirmations below the number of blocks
* they've been outstanding. * they've been outstanding.
*/ */
@ -72,12 +63,11 @@ class CTxMemPoolEntry;
static const double DEFAULT_DECAY = .998; static const double DEFAULT_DECAY = .998;
/** /**
* We will instantiate two instances of this class, one to track transactions * We will instantiate an instance of this class to track transactions that were
* that were included in a block due to fee, and one for txs included due to * included in a block. We will lump transactions into a bucket according to their
* priority. We will lump transactions into a bucket according to their approximate * approximate feerate and then track how long it took for those txs to be included
* fee or priority and then track how long it took for those txs to be included
* in a block. There is always a bucket into which any given double value * in a block. There is always a bucket into which any given double value
* (representing a fee or priority) falls. * (representing a fee) falls.
* *
* The tracking of unconfirmed (mempool) transactions is completely independent of the * The tracking of unconfirmed (mempool) transactions is completely independent of the
* historical tracking of transactions that have been confirmed in a block. * historical tracking of transactions that have been confirmed in a block.
@ -85,7 +75,7 @@ static const double DEFAULT_DECAY = .998;
class TxConfirmStats class TxConfirmStats
{ {
private: private:
//Define the buckets we will group transactions into (both fee buckets and priority buckets) //Define the buckets we will group transactions into
std::vector<double> buckets; // The upper-bound of the range for the bucket (inclusive) std::vector<double> buckets; // The upper-bound of the range for the bucket (inclusive)
std::map<double, unsigned int> bucketMap; // Map of bucket upper-bound to index into all vectors by bucket std::map<double, unsigned int> bucketMap; // Map of bucket upper-bound to index into all vectors by bucket
@ -102,16 +92,15 @@ private:
// and calculate the totals for the current block to update the moving averages // and calculate the totals for the current block to update the moving averages
std::vector<std::vector<int> > curBlockConf; // curBlockConf[Y][X] std::vector<std::vector<int> > curBlockConf; // curBlockConf[Y][X]
// Sum the total priority/fee of all txs in each bucket // Sum the total feerate of all tx's in each bucket
// Track the historical moving average of this total over blocks // Track the historical moving average of this total over blocks
std::vector<double> avg; std::vector<double> avg;
// and calculate the total for the current block to update the moving average // and calculate the total for the current block to update the moving average
std::vector<double> curBlockVal; std::vector<double> curBlockVal;
// Combine the conf counts with tx counts to calculate the confirmation % for each Y,X // Combine the conf counts with tx counts to calculate the confirmation % for each Y,X
// Combine the total value with the tx counts to calculate the avg fee/priority per bucket // Combine the total value with the tx counts to calculate the avg feerate per bucket
std::string dataTypeString;
double decay = DEFAULT_DECAY; double decay = DEFAULT_DECAY;
// Mempool counts of outstanding transactions // Mempool counts of outstanding transactions
@ -133,9 +122,8 @@ public:
* @param defaultBuckets contains the upper limits for the bucket boundaries * @param defaultBuckets contains the upper limits for the bucket boundaries
* @param maxConfirms max number of confirms to track * @param maxConfirms max number of confirms to track
* @param decay how much to decay the historical moving average per block * @param decay how much to decay the historical moving average per block
* @param dataTypeString for logging purposes
*/ */
void Initialize(std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay, std::string dataTypeString); void Initialize(std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay);
/** Clear the state of the curBlock variables to start counting for the new block */ /** Clear the state of the curBlock variables to start counting for the new block */
void ClearCurrent(unsigned int nBlockHeight); void ClearCurrent(unsigned int nBlockHeight);
@ -143,7 +131,7 @@ public:
/** /**
* Record a new transaction data point in the current block stats * Record a new transaction data point in the current block stats
* @param blocksToConfirm the number of blocks it took this transaction to confirm * @param blocksToConfirm the number of blocks it took this transaction to confirm
* @param val either the fee or the priority when entered of the transaction * @param val the feerate of the transaction
* @warning blocksToConfirm is 1-based and has to be >= 1 * @warning blocksToConfirm is 1-based and has to be >= 1
*/ */
void Record(int blocksToConfirm, double val); void Record(int blocksToConfirm, double val);
@ -160,14 +148,14 @@ public:
void UpdateMovingAverages(); void UpdateMovingAverages();
/** /**
* Calculate a fee or priority estimate. Find the lowest value bucket (or range of buckets * Calculate a feerate estimate. Find the lowest value bucket (or range of buckets
* to make sure we have enough data points) whose transactions still have sufficient likelihood * to make sure we have enough data points) whose transactions still have sufficient likelihood
* of being confirmed within the target number of confirmations * of being confirmed within the target number of confirmations
* @param confTarget target number of confirmations * @param confTarget target number of confirmations
* @param sufficientTxVal required average number of transactions per block in a bucket range * @param sufficientTxVal required average number of transactions per block in a bucket range
* @param minSuccess the success probability we require * @param minSuccess the success probability we require
* @param requireGreater return the lowest fee/pri such that all higher values pass minSuccess OR * @param requireGreater return the lowest feerate such that all higher values pass minSuccess OR
* return the highest fee/pri such that all lower values fail minSuccess * return the highest feerate such that all lower values fail minSuccess
* @param nBlockHeight the current block height * @param nBlockHeight the current block height
*/ */
double EstimateMedianVal(int confTarget, double sufficientTxVal, double EstimateMedianVal(int confTarget, double sufficientTxVal,
@ -191,35 +179,30 @@ public:
/** Track confirm delays up to 25 blocks, can't estimate beyond that */ /** Track confirm delays up to 25 blocks, can't estimate beyond that */
static const unsigned int MAX_BLOCK_CONFIRMS = 25; static const unsigned int MAX_BLOCK_CONFIRMS = 25;
/** Require greater than 85% of X fee transactions to be confirmed within Y blocks for X to be big enough */ /** Require greater than 85% of X feerate transactions to be confirmed within Y blocks for X to be big enough */
static const double MIN_SUCCESS_PCT = .85; static const double MIN_SUCCESS_PCT = .85;
static const double UNLIKELY_PCT = .5; static const double UNLIKELY_PCT = .5;
/** Require an avg of 1 tx in the combined fee bucket per block to have stat significance */ /** Require an avg of 1 tx in the combined feerate bucket per block to have stat significance */
static const double SUFFICIENT_FEETXS = 1; static const double SUFFICIENT_FEETXS = 1;
/** Require only an avg of 1 tx every 5 blocks in the combined pri bucket (way less pri txs) */ // Minimum and Maximum values for tracking feerates
static const double SUFFICIENT_PRITXS = .2;
// Minimum and Maximum values for tracking fees and priorities
static const double MIN_FEERATE = 10; static const double MIN_FEERATE = 10;
static const double MAX_FEERATE = 1e7; static const double MAX_FEERATE = 1e7;
static const double INF_FEERATE = MAX_MONEY; static const double INF_FEERATE = MAX_MONEY;
static const double MIN_PRIORITY = 10;
// Maximum priority, used for transactions with shielded components
static const double MAX_PRIORITY = 1e16; static const double MAX_PRIORITY = 1e16;
static const double INF_PRIORITY = 1e9 * MAX_MONEY; static const double INF_PRIORITY = 1e9 * MAX_MONEY;
// We have to lump transactions into buckets based on fee or priority, but we want to be able // We have to lump transactions into buckets based on feerate, but we want to be able
// to give accurate estimates over a large range of potential fees and priorities // to give accurate estimates over a large range of potential feerates
// Therefore it makes sense to exponentially space the buckets // Therefore it makes sense to exponentially space the buckets
/** Spacing of FeeRate buckets */ /** Spacing of FeeRate buckets */
static const double FEE_SPACING = 1.1; static const double FEE_SPACING = 1.1;
/** Spacing of Priority buckets */
static const double PRI_SPACING = 2;
/** /**
* We want to be able to estimate fees or priorities that are needed on txs to be included in * We want to be able to estimate feerates that are needed on tx's to be included in
* a certain number of blocks. Every time a block is added to the best chain, this class records * a certain number of blocks. Every time a block is added to the best chain, this class records
* stats on the transactions included in that block * stats on the transactions included in that block
*/ */
@ -242,44 +225,35 @@ public:
/** Remove a transaction from the mempool tracking stats*/ /** Remove a transaction from the mempool tracking stats*/
void removeTx(uint256 hash); void removeTx(uint256 hash);
/** Is this transaction likely included in a block because of its fee?*/ /** Return a feerate estimate */
bool isFeeDataPoint(const CFeeRate &fee, double pri);
/** Is this transaction likely included in a block because of its priority?*/
bool isPriDataPoint(const CFeeRate &fee, double pri);
/** Return a fee estimate */
CFeeRate estimateFee(int confTarget); CFeeRate estimateFee(int confTarget);
/** Return a priority estimate */ /** Return a priority estimate.
* DEPRECATED
* Returns -1
*/
double estimatePriority(int confTarget); double estimatePriority(int confTarget);
/** Write estimation data to a file */ /** Write estimation data to a file */
void Write(CAutoFile& fileout); void Write(CAutoFile& fileout);
/** Read estimation data from a file */ /** Read estimation data from a file */
void Read(CAutoFile& filein); void Read(CAutoFile& filein, int nFileVersion);
private: private:
CFeeRate minTrackedFee; //!< Passed to constructor to avoid dependency on main CFeeRate minTrackedFee; //!< Passed to constructor to avoid dependency on main
double minTrackedPriority; //!< Set to AllowFreeThreshold
unsigned int nBestSeenHeight; unsigned int nBestSeenHeight;
struct TxStatsInfo struct TxStatsInfo
{ {
TxConfirmStats *stats;
unsigned int blockHeight; unsigned int blockHeight;
unsigned int bucketIndex; unsigned int bucketIndex;
TxStatsInfo() : stats(NULL), blockHeight(0), bucketIndex(0) {} TxStatsInfo() : blockHeight(0), bucketIndex(0) {}
}; };
// map of txids to information about that transaction // map of txids to information about that transaction
std::map<uint256, TxStatsInfo> mapMemPoolTxs; std::map<uint256, TxStatsInfo> mapMemPoolTxs;
/** Classes to track historical data on transaction confirmations */ /** Classes to track historical data on transaction confirmations */
TxConfirmStats feeStats, priStats; TxConfirmStats feeStats;
/** Breakpoints to help determine whether a transaction was confirmed by priority or Fee */
CFeeRate feeLikely, feeUnlikely;
double priLikely, priUnlikely;
}; };
#endif // BITCOIN_POLICY_FEES_H #endif // BITCOIN_POLICY_FEES_H

View File

@ -945,9 +945,8 @@ UniValue estimatepriority(const UniValue& params, bool fHelp)
if (fHelp || params.size() != 1) if (fHelp || params.size() != 1)
throw runtime_error( throw runtime_error(
"estimatepriority nblocks\n" "estimatepriority nblocks\n"
"\nEstimates the approximate priority\n" "\nDEPRECATED. Estimates the approximate priority a zero-fee transaction needs to begin\n"
"a zero-fee transaction needs to begin confirmation\n" "confirmation within nblocks blocks.\n"
"within nblocks blocks.\n"
"\nArguments:\n" "\nArguments:\n"
"1. nblocks (numeric)\n" "1. nblocks (numeric)\n"
"\nResult:\n" "\nResult:\n"

View File

@ -19,26 +19,18 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
CTxMemPool mpool(CFeeRate(1000)); CTxMemPool mpool(CFeeRate(1000));
TestMemPoolEntryHelper entry; TestMemPoolEntryHelper entry;
CAmount basefee(2000); CAmount basefee(2000);
double basepri = 1e6;
CAmount deltaFee(100); CAmount deltaFee(100);
double deltaPri=5e5; std::vector<CAmount> feeV;
std::vector<CAmount> feeV[2];
std::vector<double> priV[2];
// Populate vectors of increasing fees or priorities // Populate vectors of increasing fees
for (int j = 0; j < 10; j++) { for (int j = 0; j < 10; j++) {
//V[0] is for fee transactions feeV.push_back(basefee * (j+1));
feeV[0].push_back(basefee * (j+1));
priV[0].push_back(0);
//V[1] is for priority transactions
feeV[1].push_back(CAmount(0));
priV[1].push_back(basepri * pow(10, j+1));
} }
// Store the hashes of transactions that have been // Store the hashes of transactions that have been
// added to the mempool by their associate fee/pri // added to the mempool by their associate fee
// txHashes[j] is populated with transactions either of // txHashes[j] is populated with transactions either of
// fee = basefee * (j+1) OR pri = 10^6 * 10^(j+1) // fee = basefee * (j+1)
std::vector<uint256> txHashes[10]; std::vector<uint256> txHashes[10];
// Create a transaction template // Create a transaction template
@ -61,19 +53,19 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
// At a decay .998 and 4 fee transactions per block // At a decay .998 and 4 fee transactions per block
// This makes the tx count about 1.33 per bucket, above the 1 threshold // This makes the tx count about 1.33 per bucket, above the 1 threshold
while (blocknum < 200) { while (blocknum < 200) {
for (int j = 0; j < 10; j++) { // For each fee/pri multiple for (int j = 0; j < 10; j++) { // For each fee
for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx for (int k = 0; k < 4; k++) { // add 4 fee txs
tx.vin[0].prevout.n = 10000*blocknum+100*j+k; // make transaction unique tx.vin[0].prevout.n = 10000*blocknum+100*j+k; // make transaction unique
uint256 hash = tx.GetHash(); uint256 hash = tx.GetHash();
mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool)); mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
txHashes[j].push_back(hash); txHashes[j].push_back(hash);
} }
} }
//Create blocks where higher fee/pri txs are included more often //Create blocks where higher fee txs are included more often
for (int h = 0; h <= blocknum%10; h++) { for (int h = 0; h <= blocknum%10; h++) {
// 10/10 blocks add highest fee/pri transactions // 10/10 blocks add highest fee transactions
// 9/10 blocks add 2nd highest and so on until ... // 9/10 blocks add 2nd highest and so on until ...
// 1/10 blocks add lowest fee/pri transactions // 1/10 blocks add lowest fee transactions
while (txHashes[9-h].size()) { while (txHashes[9-h].size()) {
std::shared_ptr<const CTransaction> ptx = mpool.get(txHashes[9-h].back()); std::shared_ptr<const CTransaction> ptx = mpool.get(txHashes[9-h].back());
if (ptx) if (ptx)
@ -94,7 +86,6 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
} }
std::vector<CAmount> origFeeEst; std::vector<CAmount> origFeeEst;
std::vector<double> origPriEst;
// Highest feerate is 10*baseRate and gets in all blocks, // Highest feerate is 10*baseRate and gets in all blocks,
// second highest feerate is 9*baseRate and gets in 9/10 blocks = 90%, // second highest feerate is 9*baseRate and gets in 9/10 blocks = 90%,
// third highest feerate is 8*base rate, and gets in 8/10 blocks = 80%, // third highest feerate is 8*base rate, and gets in 8/10 blocks = 80%,
@ -103,15 +94,11 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
// so estimateFee(2) should return 8*baseRate etc... // so estimateFee(2) should return 8*baseRate etc...
for (int i = 1; i < 10;i++) { for (int i = 1; i < 10;i++) {
origFeeEst.push_back(mpool.estimateFee(i).GetFeePerK()); origFeeEst.push_back(mpool.estimateFee(i).GetFeePerK());
origPriEst.push_back(mpool.estimatePriority(i));
if (i > 1) { // Fee estimates should be monotonically decreasing if (i > 1) { // Fee estimates should be monotonically decreasing
BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]); BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]);
BOOST_CHECK(origPriEst[i-1] <= origPriEst[i-2]);
} }
BOOST_CHECK(origFeeEst[i-1] < (10-i)*baseRate.GetFeePerK() + deltaFee); BOOST_CHECK(origFeeEst[i-1] < (10-i)*baseRate.GetFeePerK() + deltaFee);
BOOST_CHECK(origFeeEst[i-1] > (10-i)*baseRate.GetFeePerK() - deltaFee); BOOST_CHECK(origFeeEst[i-1] > (10-i)*baseRate.GetFeePerK() - deltaFee);
BOOST_CHECK(origPriEst[i-1] < pow(10,10-i) * basepri + deltaPri);
BOOST_CHECK(origPriEst[i-1] > pow(10,10-i) * basepri - deltaPri);
} }
// Mine 50 more blocks with no transactions happening, estimates shouldn't change // Mine 50 more blocks with no transactions happening, estimates shouldn't change
@ -122,19 +109,17 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
for (int i = 1; i < 10;i++) { for (int i = 1; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] + deltaFee); BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] + deltaFee);
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee); BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] + deltaPri);
BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
} }
// Mine 15 more blocks with lots of transactions happening and not getting mined // Mine 15 more blocks with lots of transactions happening and not getting mined
// Estimates should go up // Estimates should go up
while (blocknum < 265) { while (blocknum < 265) {
for (int j = 0; j < 10; j++) { // For each fee/pri multiple for (int j = 0; j < 10; j++) { // For each fee multiple
for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx for (int k = 0; k < 4; k++) { // add 4 fee txs
tx.vin[0].prevout.n = 10000*blocknum+100*j+k; tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
uint256 hash = tx.GetHash(); uint256 hash = tx.GetHash();
mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool)); mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
txHashes[j].push_back(hash); txHashes[j].push_back(hash);
} }
} }
@ -143,7 +128,6 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
for (int i = 1; i < 10;i++) { for (int i = 1; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee); BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
} }
// Mine all those transactions // Mine all those transactions
@ -160,20 +144,20 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
block.clear(); block.clear();
for (int i = 1; i < 10;i++) { for (int i = 1; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee); BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
} }
// Mine 100 more blocks where everything is mined every block // Mine 100 more blocks where everything is mined every block
// Estimates should be below original estimates (not possible for last estimate) // Estimates should be below original estimates (not possible for last estimate)
while (blocknum < 365) { while (blocknum < 365) {
for (int j = 0; j < 10; j++) { // For each fee/pri multiple for (int j = 0; j < 10; j++) { // For each fee multiple
for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx for (int k = 0; k < 4; k++) { // add 4 fee txs
tx.vin[0].prevout.n = 10000*blocknum+100*j+k; tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
uint256 hash = tx.GetHash(); uint256 hash = tx.GetHash();
mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool)); mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
std::shared_ptr<const CTransaction> ptx = mpool.get(hash); std::shared_ptr<const CTransaction> ptx = mpool.get(hash);
if (ptx) if (ptx)
block.push_back(*ptx); block.push_back(*ptx);
} }
} }
mpool.removeForBlock(block, ++blocknum, dummyConflicted); mpool.removeForBlock(block, ++blocknum, dummyConflicted);
@ -181,7 +165,6 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
} }
for (int i = 1; i < 9; i++) { for (int i = 1; i < 9; i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee); BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee);
BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] - deltaPri);
} }
} }
@ -191,7 +174,7 @@ BOOST_AUTO_TEST_CASE(TxConfirmStats_FindBucketIndex)
std::vector<double> buckets {0.0, 3.5, 42.0}; std::vector<double> buckets {0.0, 3.5, 42.0};
TxConfirmStats txcs; TxConfirmStats txcs;
txcs.Initialize(buckets, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "Test"); txcs.Initialize(buckets, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY);
BOOST_CHECK_EQUAL(txcs.FindBucketIndex(-1.0), 0); BOOST_CHECK_EQUAL(txcs.FindBucketIndex(-1.0), 0);
BOOST_CHECK_EQUAL(txcs.FindBucketIndex(0.0), 0); BOOST_CHECK_EQUAL(txcs.FindBucketIndex(0.0), 0);

View File

@ -1106,8 +1106,8 @@ CTxMemPool::WriteFeeEstimates(CAutoFile& fileout) const
{ {
try { try {
LOCK(cs); LOCK(cs);
fileout << 109900; // version required to read: 0.10.99 or later fileout << FEE_ESTIMATES_WITHOUT_PRIORITY_VERSION; // version required to read
fileout << CLIENT_VERSION; // version that wrote the file fileout << std::max(FEE_ESTIMATES_WITHOUT_PRIORITY_VERSION, CLIENT_VERSION); // version that wrote the file
minerPolicyEstimator->Write(fileout); minerPolicyEstimator->Write(fileout);
} }
catch (const std::exception&) { catch (const std::exception&) {
@ -1123,11 +1123,10 @@ CTxMemPool::ReadFeeEstimates(CAutoFile& filein)
try { try {
int nVersionRequired, nVersionThatWrote; int nVersionRequired, nVersionThatWrote;
filein >> nVersionRequired >> nVersionThatWrote; filein >> nVersionRequired >> nVersionThatWrote;
if (nVersionRequired > CLIENT_VERSION) if (nVersionRequired > std::max(FEE_ESTIMATES_WITHOUT_PRIORITY_VERSION, CLIENT_VERSION))
return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee estimate file", nVersionRequired); return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee estimate file", nVersionRequired);
LOCK(cs); LOCK(cs);
minerPolicyEstimator->Read(filein); minerPolicyEstimator->Read(filein, nVersionThatWrote);
} }
catch (const std::exception&) { catch (const std::exception&) {
LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy estimator data (non-fatal)\n"); LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy estimator data (non-fatal)\n");

View File

@ -44,6 +44,9 @@ inline bool AllowFree(double dPriority)
return dPriority > AllowFreeThreshold(); return dPriority > AllowFreeThreshold();
} }
/** Version from which we write `fee_estimates.dat` without priority information: 5.5.0-beta1 or later */
static const int FEE_ESTIMATES_WITHOUT_PRIORITY_VERSION = 5050000;
/** Fake height value used in CCoins to signify they are only in the memory pool (since 0.8) */ /** Fake height value used in CCoins to signify they are only in the memory pool (since 0.8) */
static const unsigned int MEMPOOL_HEIGHT = 0x7FFFFFFF; static const unsigned int MEMPOOL_HEIGHT = 0x7FFFFFFF;