mirror of https://github.com/BTCPrivate/z-nomp.git
Added rewardRecipients configuration, refactored payment processing, moved p2p magic to coin config
This commit is contained in:
parent
80be932651
commit
73668709ce
166
README.md
166
README.md
|
@ -284,13 +284,22 @@ Here is an example of the required fields:
|
||||||
{
|
{
|
||||||
"name": "Litecoin",
|
"name": "Litecoin",
|
||||||
"symbol": "ltc",
|
"symbol": "ltc",
|
||||||
"algorithm": "scrypt", //or "sha256", "scrypt-jane", "scrypt-n", "quark", "x11"
|
"algorithm": "scrypt",
|
||||||
"txMessages": false, //or true (not required, defaults to false)
|
|
||||||
"mposDiffMultiplier": 256, //only for x11 coins in mpos mode, set to 256 (optional)
|
/* Magic value only required for setting up p2p block notifications. It is found in the daemon
|
||||||
|
source code as the pchMessageStart variable.
|
||||||
|
For example, litecoin mainnet magic: http://git.io/Bi8YFw
|
||||||
|
And for litecoin testnet magic: http://git.io/NXBYJA */
|
||||||
|
"peerMagic": "fbc0b6db" //optional
|
||||||
|
"peerMagicTestnet": "fcc1b7dc" //optional
|
||||||
|
|
||||||
|
//"txMessages": false, //options - defaults to false
|
||||||
|
|
||||||
|
//"mposDiffMultiplier": 256, //options - only for x11 coins in mpos mode
|
||||||
}
|
}
|
||||||
````
|
````
|
||||||
|
|
||||||
For additional documentation how to configure coins *(especially important for scrypt-n and scrypt-jane coins)*
|
For additional documentation how to configure coins and their different algorithms
|
||||||
see [these instructions](//github.com/zone117x/node-stratum-pool#module-usage).
|
see [these instructions](//github.com/zone117x/node-stratum-pool#module-usage).
|
||||||
|
|
||||||
|
|
||||||
|
@ -307,6 +316,17 @@ Description of options:
|
||||||
|
|
||||||
"address": "mi4iBXbBsydtcc5yFmsff2zCFVX4XG7qJc", //Address to where block rewards are given
|
"address": "mi4iBXbBsydtcc5yFmsff2zCFVX4XG7qJc", //Address to where block rewards are given
|
||||||
|
|
||||||
|
/* Block rewards go to the configured pool wallet address to later be paid out to miners,
|
||||||
|
except for a percentages that can go to pool operator(s) as pool fees or donations.
|
||||||
|
Addresses or hashed public keys can be used. */
|
||||||
|
"rewardRecipients": {
|
||||||
|
"n37vuNFkXfk15uFnGoVyHZ6PYQxppD3QqK": 1.5, //1.5% goes to pool op
|
||||||
|
"mirj3LtZxbSTharhtXvotqtJXUY7ki5qfx": 0.5, //0.5% goes to a pool co-owner
|
||||||
|
|
||||||
|
//0.1% donation to NOMP to help support development
|
||||||
|
"22851477d63a085dbc2398c8430af1c09e7343f6": 0.1
|
||||||
|
},
|
||||||
|
|
||||||
"blockRefreshInterval": 1000, //How often to poll RPC daemons for new blocks, in milliseconds
|
"blockRefreshInterval": 1000, //How often to poll RPC daemons for new blocks, in milliseconds
|
||||||
|
|
||||||
/* How many milliseconds should have passed before new block transactions will trigger a new
|
/* How many milliseconds should have passed before new block transactions will trigger a new
|
||||||
|
@ -332,96 +352,68 @@ Description of options:
|
||||||
miners/pools that deal with scrypt use a guesstimated one that is about 5.86% off from the
|
miners/pools that deal with scrypt use a guesstimated one that is about 5.86% off from the
|
||||||
actual one. So here we can set a tolerable threshold for if a share is slightly too low
|
actual one. So here we can set a tolerable threshold for if a share is slightly too low
|
||||||
due to mining apps using incorrect max diffs and this pool using correct max diffs. */
|
due to mining apps using incorrect max diffs and this pool using correct max diffs. */
|
||||||
"shareVariancePercent": 10,
|
"shareVariancePercent": 2,
|
||||||
|
|
||||||
/* Enable for client IP addresses to be detected when using a load balancer with TCP proxy
|
/* Enable for client IP addresses to be detected when using a load balancer with TCP proxy
|
||||||
protocol enabled, such as HAProxy with 'send-proxy' param:
|
protocol enabled, such as HAProxy with 'send-proxy' param:
|
||||||
http://haproxy.1wt.eu/download/1.5/doc/configuration.txt */
|
http://haproxy.1wt.eu/download/1.5/doc/configuration.txt */
|
||||||
"tcpProxyProtocol": false,
|
"tcpProxyProtocol": false,
|
||||||
|
|
||||||
|
/* To receive payments, miners must connect with their address or mining key as their username.
|
||||||
|
This option will only authenticate miners using an address or mining key. */
|
||||||
|
"validateWorkerAddress": true,
|
||||||
|
|
||||||
/* This determines what to do with submitted shares (and stratum worker authentication).
|
"paymentProcessing": {
|
||||||
You have two options:
|
"enabled": true,
|
||||||
1) Enable internal and disable mpos = this portal to handle all share payments.
|
|
||||||
2) Enable mpos and disable internal = shares will be inserted into MySQL database
|
|
||||||
for MPOS to process. */
|
|
||||||
"shareProcessing": {
|
|
||||||
|
|
||||||
"internal": {
|
/* Every this many seconds get submitted blocks from redis, use daemon RPC to check
|
||||||
"enabled": true,
|
their confirmation status, if confirmed then get shares from redis that contributed
|
||||||
|
to block and send out payments. */
|
||||||
|
"paymentInterval": 30,
|
||||||
|
|
||||||
/* When workers connect, to receive payments, their address must be used as the worker
|
/* Minimum number of coins that a miner must earn before sending payment. Typically,
|
||||||
name. If this option is true, on worker authentication, their address will be
|
a higher minimum means less transactions fees (you profit more) but miners see
|
||||||
verified via a validateaddress API call to the daemon. Miners with invalid addresses
|
payments less frequently (they dislike). Opposite for a lower minimum payment. */
|
||||||
will be rejected. */
|
"minimumPayment": 0.01,
|
||||||
"validateWorkerAddress": true,
|
|
||||||
|
|
||||||
/* Every this many seconds get submitted blocks from redis, use daemon RPC to check
|
/* This daemon is used to send out payments. It MUST be for the daemon that owns the
|
||||||
their confirmation status, if confirmed then get shares from redis that contributed
|
configured 'address' that receives the block rewards, otherwise the daemon will not
|
||||||
to block and send out payments. */
|
be able to confirm blocks or send out payments. */
|
||||||
"paymentInterval": 30,
|
"daemon": {
|
||||||
|
"host": "127.0.0.1",
|
||||||
/* Minimum number of coins that a miner must earn before sending payment. Typically,
|
"port": 19332,
|
||||||
a higher minimum means less transactions fees (you profit more) but miners see
|
"user": "litecoinrpc",
|
||||||
payments less frequently (they dislike). Opposite for a lower minimum payment. */
|
"password": "testnet"
|
||||||
"minimumPayment": 0.001,
|
|
||||||
|
|
||||||
/* Minimum number of coins to keep in pool wallet. It is recommended to deposit at
|
|
||||||
at least this many coins into the pool wallet when first starting the pool. */
|
|
||||||
"minimumReserve": 10,
|
|
||||||
|
|
||||||
/* (2% default) What percent fee your pool takes from the block reward. */
|
|
||||||
"feePercent": 0.02,
|
|
||||||
|
|
||||||
/* Name of the daemon account to use when moving coin profit within daemon wallet. */
|
|
||||||
"feeCollectAccount": "feesCollected",
|
|
||||||
|
|
||||||
/* Your address that receives pool revenue from fees. */
|
|
||||||
"feeReceiveAddress": "LZz44iyF4zLCXJTU8RxztyyJZBntdS6fvv",
|
|
||||||
|
|
||||||
/* How many coins from fee revenue must accumulate on top of the
|
|
||||||
minimum reserve amount in order to trigger withdrawal to fee address. The higher
|
|
||||||
this threshold, the less of your profit goes to transactions fees. */
|
|
||||||
"feeWithdrawalThreshold": 5,
|
|
||||||
|
|
||||||
/* This daemon is used to send out payments. It MUST be for the daemon that owns the
|
|
||||||
configured 'address' that receives the block rewards, otherwise the daemon will not
|
|
||||||
be able to confirm blocks or send out payments. */
|
|
||||||
"daemon": {
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
"port": 19332,
|
|
||||||
"user": "litecoinrpc",
|
|
||||||
"password": "testnet"
|
|
||||||
},
|
|
||||||
|
|
||||||
/* Redis database used for storing share and block submission data. */
|
|
||||||
"redis": {
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
"port": 6379
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
/* Enabled mpos and shares will be inserted into share table in a MySQL database. You may
|
|
||||||
also want to use the "emitInvalidBlockHashes" option below if you require it. */
|
|
||||||
"mpos": {
|
|
||||||
"enabled": false,
|
|
||||||
"host": "127.0.0.1", //MySQL db host
|
|
||||||
"port": 3306, //MySQL db port
|
|
||||||
"user": "me", //MySQL db user
|
|
||||||
"password": "mypass", //MySQL db password
|
|
||||||
"database": "ltc", //MySQL db database name
|
|
||||||
|
|
||||||
/* Unregistered workers can automatically be registered (added to database) on stratum
|
|
||||||
worker authentication if this is true. */
|
|
||||||
"autoCreateWorker": false,
|
|
||||||
|
|
||||||
/* For when miner's authenticate: set to "password" for both worker name and password to
|
|
||||||
be checked for in the database, set to "worker" for only work name to be checked, or
|
|
||||||
don't use this option (set to "none") for no auth checks */
|
|
||||||
"stratumAuth": "password"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/* Redis database used for storing share and block submission data and payment processing. */
|
||||||
|
"redis": {
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6379
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Enabled this mode and shares will be inserted into in a MySQL database. You may also want
|
||||||
|
to use the "emitInvalidBlockHashes" option below if you require it. The config options
|
||||||
|
"redis" and "paymentProcessing" will be ignored/unused if this is enabled. */
|
||||||
|
"mposMode": {
|
||||||
|
"enabled": false,
|
||||||
|
"host": "127.0.0.1", //MySQL db host
|
||||||
|
"port": 3306, //MySQL db port
|
||||||
|
"user": "me", //MySQL db user
|
||||||
|
"password": "mypass", //MySQL db password
|
||||||
|
"database": "ltc", //MySQL db database name
|
||||||
|
|
||||||
|
/* Checks for valid password in database when miners connect. */
|
||||||
|
"checkPassword": true,
|
||||||
|
|
||||||
|
/* Unregistered workers can automatically be registered (added to database) on stratum
|
||||||
|
worker authentication if this is true. */
|
||||||
|
"autoCreateWorker": false
|
||||||
|
|
||||||
|
|
||||||
|
},
|
||||||
|
|
||||||
/* If a worker is submitting a high threshold of invalid shares we can temporarily ban their IP
|
/* If a worker is submitting a high threshold of invalid shares we can temporarily ban their IP
|
||||||
to reduce system/network load. Also useful to fight against flooding attacks. If running
|
to reduce system/network load. Also useful to fight against flooding attacks. If running
|
||||||
behind something like HAProxy be sure to enable 'tcpProxyProtocol', otherwise you'll end up
|
behind something like HAProxy be sure to enable 'tcpProxyProtocol', otherwise you'll end up
|
||||||
|
@ -476,8 +468,8 @@ Description of options:
|
||||||
|
|
||||||
/* This allows the pool to connect to the daemon as a node peer to receive block updates.
|
/* This allows the pool to connect to the daemon as a node peer to receive block updates.
|
||||||
It may be the most efficient way to get block updates (faster than polling, less
|
It may be the most efficient way to get block updates (faster than polling, less
|
||||||
intensive than blocknotify script). It requires additional setup: the 'magic' field must
|
intensive than blocknotify script). It requires the additional field "peerMagic" in
|
||||||
be exact (extracted from the coin source code). */
|
the coin config. */
|
||||||
"p2p": {
|
"p2p": {
|
||||||
"enabled": false,
|
"enabled": false,
|
||||||
|
|
||||||
|
@ -490,13 +482,7 @@ Description of options:
|
||||||
/* If your coin daemon is new enough (i.e. not a shitcoin) then it will support a p2p
|
/* If your coin daemon is new enough (i.e. not a shitcoin) then it will support a p2p
|
||||||
feature that prevents the daemon from spamming our peer node with unnecessary
|
feature that prevents the daemon from spamming our peer node with unnecessary
|
||||||
transaction data. Assume its supported but if you have problems try disabling it. */
|
transaction data. Assume its supported but if you have problems try disabling it. */
|
||||||
"disableTransactions": true,
|
"disableTransactions": true
|
||||||
|
|
||||||
/* Magic value is different for main/testnet and for each coin. It is found in the daemon
|
|
||||||
source code as the pchMessageStart variable.
|
|
||||||
For example, litecoin mainnet magic: http://git.io/Bi8YFw
|
|
||||||
And for litecoin testnet magic: http://git.io/NXBYJA */
|
|
||||||
"magic": "fcc1b7dc"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
{
|
{
|
||||||
"name": "Litecoin",
|
"name": "Litecoin",
|
||||||
"symbol": "LTC",
|
"symbol": "LTC",
|
||||||
"algorithm": "scrypt"
|
"algorithm": "scrypt",
|
||||||
|
"peerMagic": "fbc0b6db",
|
||||||
|
"peerMagicTestnet": "fcc1b7dc"
|
||||||
}
|
}
|
9
init.js
9
init.js
|
@ -161,13 +161,6 @@ var spawnPoolWorkers = function(){
|
||||||
|
|
||||||
Object.keys(poolConfigs).forEach(function(coin){
|
Object.keys(poolConfigs).forEach(function(coin){
|
||||||
var p = poolConfigs[coin];
|
var p = poolConfigs[coin];
|
||||||
var internalEnabled = p.shareProcessing && p.shareProcessing.internal && p.shareProcessing.internal.enabled;
|
|
||||||
var mposEnabled = p.shareProcessing && p.shareProcessing.mpos && p.shareProcessing.mpos.enabled;
|
|
||||||
|
|
||||||
if (!internalEnabled && !mposEnabled){
|
|
||||||
logger.error('Master', coin, 'Share processing is not configured so a pool cannot be started for this coin.');
|
|
||||||
delete poolConfigs[coin];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!Array.isArray(p.daemons) || p.daemons.length < 1){
|
if (!Array.isArray(p.daemons) || p.daemons.length < 1){
|
||||||
logger.error('Master', coin, 'No daemons configured so a pool cannot be started for this coin.');
|
logger.error('Master', coin, 'No daemons configured so a pool cannot be started for this coin.');
|
||||||
|
@ -364,7 +357,7 @@ var startPaymentProcessor = function(){
|
||||||
var enabledForAny = false;
|
var enabledForAny = false;
|
||||||
for (var pool in poolConfigs){
|
for (var pool in poolConfigs){
|
||||||
var p = poolConfigs[pool];
|
var p = poolConfigs[pool];
|
||||||
var enabled = p.enabled && p.shareProcessing && p.shareProcessing.internal && p.shareProcessing.internal.enabled;
|
var enabled = p.enabled && p.paymentProcessing && p.paymentProcessing.enabled;
|
||||||
if (enabled){
|
if (enabled){
|
||||||
enabledForAny = true;
|
enabledForAny = true;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -2,31 +2,30 @@ var mysql = require('mysql');
|
||||||
var cluster = require('cluster');
|
var cluster = require('cluster');
|
||||||
module.exports = function(logger, poolConfig){
|
module.exports = function(logger, poolConfig){
|
||||||
|
|
||||||
var mposConfig = poolConfig.shareProcessing.mpos;
|
var mposConfig = poolConfig.mposMode;
|
||||||
var coin = poolConfig.coin.name;
|
var coin = poolConfig.coin.name;
|
||||||
|
|
||||||
var connection;
|
var connection = mysql.createPool({
|
||||||
|
host: mposConfig.host,
|
||||||
|
port: mposConfig.port,
|
||||||
|
user: mposConfig.user,
|
||||||
|
password: mposConfig.password,
|
||||||
|
database: mposConfig.database
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
var logIdentify = 'MySQL';
|
var logIdentify = 'MySQL';
|
||||||
var logComponent = coin;
|
var logComponent = coin;
|
||||||
|
|
||||||
function connect(){
|
|
||||||
|
|
||||||
connection = mysql.createPool({
|
|
||||||
host: mposConfig.host,
|
|
||||||
port: mposConfig.port,
|
|
||||||
user: mposConfig.user,
|
|
||||||
password: mposConfig.password,
|
|
||||||
database: mposConfig.database
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
connect();
|
|
||||||
|
|
||||||
this.handleAuth = function(workerName, password, authCallback){
|
this.handleAuth = function(workerName, password, authCallback){
|
||||||
|
|
||||||
|
if (poolConfig.validateWorkerUsername !== true && mposConfig.autoCreateWorker !== true){
|
||||||
|
authCallback(true);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
connection.query(
|
connection.query(
|
||||||
'SELECT password FROM pool_worker WHERE username = LOWER(?)',
|
'SELECT password FROM pool_worker WHERE username = LOWER(?)',
|
||||||
[workerName.toLowerCase()],
|
[workerName.toLowerCase()],
|
||||||
|
@ -65,16 +64,15 @@ module.exports = function(logger, poolConfig){
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}else{
|
}
|
||||||
|
else{
|
||||||
authCallback(false);
|
authCallback(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (mposConfig.stratumAuth === 'worker')
|
else if (mposConfig.checkPassword && result[0].password !== password)
|
||||||
authCallback(true);
|
|
||||||
else if (result[0].password === password)
|
|
||||||
authCallback(true)
|
|
||||||
else
|
|
||||||
authCallback(false);
|
authCallback(false);
|
||||||
|
else
|
||||||
|
authCallback(true);
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -13,9 +13,8 @@ module.exports = function(logger){
|
||||||
|
|
||||||
Object.keys(poolConfigs).forEach(function(coin) {
|
Object.keys(poolConfigs).forEach(function(coin) {
|
||||||
var poolOptions = poolConfigs[coin];
|
var poolOptions = poolConfigs[coin];
|
||||||
if (poolOptions.shareProcessing &&
|
if (poolOptions.paymentProcessing &&
|
||||||
poolOptions.shareProcessing.internal &&
|
poolOptions.paymentProcessing.enabled)
|
||||||
poolOptions.shareProcessing.internal.enabled)
|
|
||||||
enabledPools.push(coin);
|
enabledPools.push(coin);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -27,14 +26,14 @@ module.exports = function(logger){
|
||||||
coins.forEach(function(coin){
|
coins.forEach(function(coin){
|
||||||
|
|
||||||
var poolOptions = poolConfigs[coin];
|
var poolOptions = poolConfigs[coin];
|
||||||
var processingConfig = poolOptions.shareProcessing.internal;
|
var processingConfig = poolOptions.paymentProcessing;
|
||||||
var logSystem = 'Payments';
|
var logSystem = 'Payments';
|
||||||
var logComponent = coin;
|
var logComponent = coin;
|
||||||
|
|
||||||
logger.debug(logSystem, logComponent, 'Payment processing setup to run every '
|
logger.debug(logSystem, logComponent, 'Payment processing setup to run every '
|
||||||
+ processingConfig.paymentInterval + ' second(s) with daemon ('
|
+ processingConfig.paymentInterval + ' second(s) with daemon ('
|
||||||
+ processingConfig.daemon.user + '@' + processingConfig.daemon.host + ':' + processingConfig.daemon.port
|
+ processingConfig.daemon.user + '@' + processingConfig.daemon.host + ':' + processingConfig.daemon.port
|
||||||
+ ') and redis (' + processingConfig.redis.host + ':' + processingConfig.redis.port + ')');
|
+ ') and redis (' + poolOptions.redis.host + ':' + poolOptions.redis.port + ')');
|
||||||
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -45,68 +44,64 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||||
|
|
||||||
|
|
||||||
var coin = poolOptions.coin.name;
|
var coin = poolOptions.coin.name;
|
||||||
var processingConfig = poolOptions.shareProcessing.internal;
|
var processingConfig = poolOptions.paymentProcessing;
|
||||||
|
|
||||||
var logSystem = 'Payments';
|
var logSystem = 'Payments';
|
||||||
var logComponent = coin;
|
var logComponent = coin;
|
||||||
|
|
||||||
var processingPayments = true;
|
var daemon = new Stratum.daemon.interface([processingConfig.daemon]);
|
||||||
|
var redisClient = redis.createClient(poolOptions.redis.port, poolOptions.redis.host);
|
||||||
|
|
||||||
var daemon;
|
var magnitude;
|
||||||
var redisClient;
|
var minPaymentSatoshis;
|
||||||
|
var coinPrecision;
|
||||||
|
|
||||||
|
var paymentInterval;
|
||||||
|
|
||||||
async.parallel([
|
async.parallel([
|
||||||
|
|
||||||
function(callback){
|
function(callback){
|
||||||
daemon = new Stratum.daemon.interface([processingConfig.daemon]);
|
daemon.cmd('validateaddress', [poolOptions.address], function(result) {
|
||||||
daemon.once('online', function(){
|
if (result.error){
|
||||||
daemon.cmd('validateaddress', [poolOptions.address], function(result){
|
logger.error(logSystem, logComponent, 'Error with payment processing daemon ' + JSON.stringify(result.error));
|
||||||
if (!result[0].response || !result[0].response.ismine){
|
callback(true);
|
||||||
logger.error(logSystem, logComponent,
|
}
|
||||||
|
else if (!result.response || !result.response.ismine) {
|
||||||
|
logger.error(logSystem, logComponent,
|
||||||
'Daemon does not own pool address - payment processing can not be done with this daemon, '
|
'Daemon does not own pool address - payment processing can not be done with this daemon, '
|
||||||
+ JSON.stringify(result[0].response));
|
+ JSON.stringify(result.response));
|
||||||
return;
|
callback(true);
|
||||||
}
|
}
|
||||||
|
else{
|
||||||
callback()
|
callback()
|
||||||
});
|
}
|
||||||
}).once('connectionFailed', function(error){
|
}, true);
|
||||||
logger.error(logSystem, logComponent, 'Failed to connect to daemon for payment processing: config ' +
|
|
||||||
JSON.stringify(processingConfig.daemon) + ', error: ' +
|
|
||||||
JSON.stringify(error));
|
|
||||||
callback('Error connecting to deamon');
|
|
||||||
}).on('error', function(error){
|
|
||||||
logger.error(logSystem, logComponent, 'Daemon error ' + JSON.stringify(error));
|
|
||||||
}).init();
|
|
||||||
},
|
},
|
||||||
function(callback){
|
function(callback){
|
||||||
|
daemon.cmd('getbalance', [], function(result){
|
||||||
redisClient = redis.createClient(processingConfig.redis.port, processingConfig.redis.host);
|
if (result.error){
|
||||||
redisClient.on('ready', function(){
|
callback(true);
|
||||||
if (callback) {
|
|
||||||
callback();
|
|
||||||
callback = null;
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
logger.debug(logSystem, logComponent, 'Connected to redis at '
|
try {
|
||||||
+ processingConfig.redis.host + ':' + processingConfig.redis.port + ' for payment processing');
|
var d = result.data.split('result":')[1].split(',')[0].split('.')[1];
|
||||||
}).on('end', function(){
|
magnitude = parseInt('10' + new Array(d.length).join('0'));
|
||||||
logger.error(logSystem, logComponent, 'Connection to redis database as been ended');
|
minPaymentSatoshis = parseInt(processingConfig.minimumPayment * magnitude);
|
||||||
}).once('error', function(){
|
coinPrecision = magnitude.toString().length - 1;
|
||||||
if (callback) {
|
callback();
|
||||||
logger.error(logSystem, logComponent, 'Failed to connect to redis at '
|
}
|
||||||
+ processingConfig.redis.host + ':' + processingConfig.redis.port + ' for payment processing');
|
catch(e){
|
||||||
callback('Error connecting to redis');
|
logger.error(logSystem, logComponent, 'Error detecting number of satoshis in a coin, cannot do payment processing');
|
||||||
callback = null;
|
callback(true);
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
|
||||||
|
}, true, true);
|
||||||
}
|
}
|
||||||
], function(err){
|
], function(err){
|
||||||
if (err){
|
if (err){
|
||||||
setupFinished(false);
|
setupFinished(false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
setInterval(function(){
|
paymentInterval = setInterval(function(){
|
||||||
try {
|
try {
|
||||||
processPayments();
|
processPayments();
|
||||||
} catch(e){
|
} catch(e){
|
||||||
|
@ -118,97 +113,74 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
/* Call redis to check if previous sendmany and/or redis cleanout commands completed successfully.
|
|
||||||
If sendmany worked fine but redis commands failed you HAVE TO run redis commands again
|
|
||||||
(manually) to prevent double payments. If sendmany failed too you can safely delete
|
|
||||||
coin + '_finalRedisCommands' string from redis to let pool calculate payments again. */
|
|
||||||
function checkPreviousPaymentsStatus(callback) {
|
|
||||||
redisClient.get(coin + '_finalRedisCommands', function(error, reply) {
|
|
||||||
if (error){
|
|
||||||
callback('Could not get finalRedisCommands - ' + JSON.stringify(error));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (reply) {
|
|
||||||
callback('Payments stopped because of the critical error - failed commands saved in '
|
|
||||||
+ coin + '_finalRedisCommands redis set:\n' + reply);
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
/* There was no error in previous sendmany and/or redis cleanout commands
|
|
||||||
so we can safely continue */
|
|
||||||
processingPayments = false;
|
|
||||||
callback();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* Number.toFixed gives us the decimal places we want, but as a string. parseFloat turns it back into number
|
var satoshisToCoins = function(satoshis){
|
||||||
we don't care about trailing zeros in this case. */
|
return parseFloat((satoshis / magnitude).toFixed(coinPrecision));
|
||||||
var toPrecision = function(value, precision){
|
|
||||||
return parseFloat(value.toFixed(precision));
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/* Deal with numbers in smallest possible units (satoshis) as much as possible. This greatly helps with accuracy
|
/* Deal with numbers in smallest possible units (satoshis) as much as possible. This greatly helps with accuracy
|
||||||
when rounding and whatnot. When we are storing numbers for only humans to see, store in whole coin units. */
|
when rounding and whatnot. When we are storing numbers for only humans to see, store in whole coin units. */
|
||||||
|
|
||||||
var processPayments = function(){
|
var processPayments = function(){
|
||||||
|
|
||||||
|
|
||||||
var startPaymentProcess = Date.now();
|
var startPaymentProcess = Date.now();
|
||||||
|
|
||||||
async.waterfall([
|
var timeSpentRPC = 0;
|
||||||
|
var timeSpentRedis = 0;
|
||||||
|
|
||||||
function(callback) {
|
var startTimeRedis;
|
||||||
if (processingPayments) {
|
var startTimeRPC;
|
||||||
checkPreviousPaymentsStatus(function(error){
|
|
||||||
if (error) {
|
var startRedisTimer = function(){ startTimeRedis = Date.now() };
|
||||||
logger.error(logSystem, logComponent, error);
|
var endRedisTimer = function(){ timeSpentRedis += Date.now() - startTimeRedis };
|
||||||
callback('Check finished - previous payments processing error');
|
|
||||||
return;
|
var startRPCTimer = function(){ startTimeRPC = Date.now(); };
|
||||||
}
|
var endRPCTimer = function(){ timeSpentRPC += Date.now() - startTimeRedis };
|
||||||
callback();
|
|
||||||
});
|
async.waterfall([
|
||||||
return;
|
|
||||||
}
|
|
||||||
callback();
|
|
||||||
},
|
|
||||||
|
|
||||||
/* Call redis to get an array of rounds - which are coinbase transactions and block heights from submitted
|
/* Call redis to get an array of rounds - which are coinbase transactions and block heights from submitted
|
||||||
blocks. */
|
blocks. */
|
||||||
function(callback){
|
function(callback){
|
||||||
|
|
||||||
redisClient.smembers(coin + '_blocksPending', function(error, results){
|
startRedisTimer();
|
||||||
|
redisClient.multi([
|
||||||
|
['hgetall', coin + '_balances'],
|
||||||
|
['smembers', coin + '_blocksPending']
|
||||||
|
]).exec(function(error, results){
|
||||||
|
endRedisTimer();
|
||||||
|
|
||||||
if (error){
|
if (error){
|
||||||
logger.error(logSystem, logComponent, 'Could not get blocks from redis ' + JSON.stringify(error));
|
logger.error(logSystem, logComponent, 'Could not get blocks from redis ' + JSON.stringify(error));
|
||||||
callback('Check finished - redis error for getting blocks');
|
callback(true);
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (results.length === 0){
|
|
||||||
callback('Check finished - no pending blocks in redis');
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
var rounds = results.map(function(r){
|
|
||||||
|
|
||||||
|
var workers = {};
|
||||||
|
for (var w in results[0]){
|
||||||
|
workers[w] = {balance: parseInt(results[0][w])};
|
||||||
|
}
|
||||||
|
|
||||||
|
var rounds = results[1].map(function(r){
|
||||||
var details = r.split(':');
|
var details = r.split(':');
|
||||||
return {
|
return {
|
||||||
category: details[0].category,
|
|
||||||
blockHash: details[0],
|
blockHash: details[0],
|
||||||
txHash: details[1],
|
txHash: details[1],
|
||||||
height: details[2],
|
height: details[2],
|
||||||
reward: details[3],
|
|
||||||
serialized: r
|
serialized: r
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
callback(null, rounds);
|
callback(null, workers, rounds);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
/* Does a batch rpc call to daemon with all the transaction hashes to see if they are confirmed yet.
|
/* Does a batch rpc call to daemon with all the transaction hashes to see if they are confirmed yet.
|
||||||
It also adds the block reward amount to the round object - which the daemon gives also gives us. */
|
It also adds the block reward amount to the round object - which the daemon gives also gives us. */
|
||||||
function(rounds, callback){
|
function(workers, rounds, callback){
|
||||||
|
|
||||||
var batchRPCcommand = rounds.map(function(r){
|
var batchRPCcommand = rounds.map(function(r){
|
||||||
return ['gettransaction', [r.txHash]];
|
return ['gettransaction', [r.txHash]];
|
||||||
|
@ -216,11 +188,14 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||||
|
|
||||||
batchRPCcommand.push(['getaccount', [poolOptions.address]]);
|
batchRPCcommand.push(['getaccount', [poolOptions.address]]);
|
||||||
|
|
||||||
|
startRPCTimer();
|
||||||
daemon.batchCmd(batchRPCcommand, function(error, txDetails){
|
daemon.batchCmd(batchRPCcommand, function(error, txDetails){
|
||||||
|
endRPCTimer();
|
||||||
|
|
||||||
if (error || !txDetails){
|
if (error || !txDetails){
|
||||||
callback('Check finished - daemon rpc error with batch gettransactions ' +
|
logger.error(logSystem, logComponent, 'Check finished - daemon rpc error with batch gettransactions '
|
||||||
JSON.stringify(error));
|
+ JSON.stringify(error));
|
||||||
|
callback(true);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,70 +210,51 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||||
|
|
||||||
var round = rounds[i];
|
var round = rounds[i];
|
||||||
|
|
||||||
if (tx.error && tx.error.code === -5 || round.blockHash !== tx.result.blockhash){
|
if (tx.error && tx.error.code === -5){
|
||||||
|
logger.error(logSystem, logComponent, 'Daemon reports invalid transaction ' + round.txHash + ' '
|
||||||
/* Block was dropped from coin daemon even after it happily accepted it earlier. */
|
+ JSON.stringify(tx.error));
|
||||||
|
return;
|
||||||
//If we find another block at the same height then this block was drop-kicked orphaned
|
|
||||||
var dropKicked = rounds.filter(function(r){
|
|
||||||
return r.height === round.height && r.blockHash !== round.blockHash && r.category !== 'dropkicked';
|
|
||||||
}).length > 0;
|
|
||||||
|
|
||||||
if (dropKicked){
|
|
||||||
logger.warning(logSystem, logComponent,
|
|
||||||
'A block was drop-kicked orphaned'
|
|
||||||
+ ' - we found a better block at the same height, blockHash '
|
|
||||||
+ round.blockHash + " round " + round.height);
|
|
||||||
round.category = 'dropkicked';
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
/* We have no other blocks that match this height so convert to orphan in order for
|
|
||||||
shares from the round to be rewarded. */
|
|
||||||
round.category = 'orphan';
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else if (tx.error || !tx.result){
|
else if (tx.error || !tx.result){
|
||||||
logger.error(logSystem, logComponent,
|
logger.error(logSystem, logComponent, 'Odd error with gettransaction ' + round.txHash + ' '
|
||||||
'Error with requesting transaction from block daemon: ' + JSON.stringify(tx));
|
+ JSON.stringify(tx));
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
else{
|
else if (round.blockHash !== tx.result.blockhash){
|
||||||
round.category = tx.result.details[0].category;
|
logger.error(logSystem, logComponent, 'Daemon reports blockhash ' + tx.result.blockhash
|
||||||
if (round.category === 'generate')
|
+ ' for tx ' + round.txHash + ' is not the one we have stored: ' + round.blockHash);
|
||||||
round.amount = tx.result.amount;
|
return;
|
||||||
}
|
}
|
||||||
|
else if (!(tx.result.details instanceof Array)){
|
||||||
|
logger.error(logSystem, logComponent, 'Details array missing from transaction '
|
||||||
|
+ round.txHash);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var generationTx = tx.result.details.filter(function(tx){
|
||||||
|
return tx.address === poolOptions.address;
|
||||||
|
})[0];
|
||||||
|
|
||||||
|
if (!generationTx){
|
||||||
|
logger.error(logSystem, logComponent, 'Missing output details to pool address for transaction '
|
||||||
|
+ round.txHash);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
round.category = generationTx.category;
|
||||||
|
if (round.category === 'generate') {
|
||||||
|
round.reward = generationTx.amount;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
var magnitude;
|
|
||||||
|
|
||||||
//Filter out all rounds that are immature (not confirmed or orphaned yet)
|
//Filter out all rounds that are immature (not confirmed or orphaned yet)
|
||||||
rounds = rounds.filter(function(r){
|
rounds = rounds.filter(function(r){
|
||||||
switch (r.category) {
|
switch (r.category) {
|
||||||
|
|
||||||
case 'generate':
|
case 'generate':
|
||||||
/* Here we calculate the smallest unit in this coin's currency; the 'satoshi'.
|
|
||||||
The rpc.getblocktemplate.amount tells us how much we get in satoshis, while the
|
|
||||||
rpc.gettransaction.amount tells us how much we get in whole coin units. Therefore,
|
|
||||||
we simply divide the two to get the magnitude. I don't know math, there is probably
|
|
||||||
a better term than 'magnitude'. Sue me or do a pull request to fix it. */
|
|
||||||
var roundMagnitude = Math.round(r.reward / r.amount);
|
|
||||||
|
|
||||||
if (!magnitude) {
|
|
||||||
magnitude = roundMagnitude;
|
|
||||||
|
|
||||||
if (roundMagnitude % 10 !== 0)
|
|
||||||
logger.error(logSystem, logComponent,
|
|
||||||
'Satosihis in coin is not divisible by 10 which is very odd');
|
|
||||||
}
|
|
||||||
else if (magnitude != roundMagnitude) {
|
|
||||||
/* Magnitude for a coin should ALWAYS be the same. For BTC and most coins there are
|
|
||||||
100,000,000 satoshis in one coin unit. */
|
|
||||||
logger.error(logSystem, logComponent,
|
|
||||||
'Magnitude in a round was different than in another round. HUGE PROBLEM.');
|
|
||||||
}
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
case 'dropkicked':
|
|
||||||
case 'orphan':
|
case 'orphan':
|
||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
|
@ -307,35 +263,30 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
if (rounds.length === 0){
|
callback(null, workers, rounds, addressAccount);
|
||||||
callback('Check finished - no confirmed or orphaned blocks found');
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
callback(null, rounds, magnitude, addressAccount);
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
/* Does a batch redis call to get shares contributed to each round. Then calculates the reward
|
/* Does a batch redis call to get shares contributed to each round. Then calculates the reward
|
||||||
amount owned to each miner for each round. */
|
amount owned to each miner for each round. */
|
||||||
function(rounds, magnitude, addressAccount, callback){
|
function(workers, rounds, addressAccount, callback){
|
||||||
|
|
||||||
|
|
||||||
var shareLookups = rounds.map(function(r){
|
var shareLookups = rounds.map(function(r){
|
||||||
return ['hgetall', coin + '_shares:round' + r.height]
|
return ['hgetall', coin + '_shares:round' + r.height]
|
||||||
});
|
});
|
||||||
|
|
||||||
|
startRedisTimer();
|
||||||
redisClient.multi(shareLookups).exec(function(error, allWorkerShares){
|
redisClient.multi(shareLookups).exec(function(error, allWorkerShares){
|
||||||
|
endRedisTimer();
|
||||||
|
|
||||||
if (error){
|
if (error){
|
||||||
callback('Check finished - redis error with multi get rounds share')
|
callback('Check finished - redis error with multi get rounds share');
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
var orphanMergeCommands = [];
|
|
||||||
var workerRewards = {};
|
|
||||||
|
|
||||||
|
|
||||||
rounds.forEach(function(round, i){
|
rounds.forEach(function(round, i){
|
||||||
var workerShares = allWorkerShares[i];
|
var workerShares = allWorkerShares[i];
|
||||||
|
@ -352,282 +303,191 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||||
miners still get a reward for their work. This seems unfair to those that just
|
miners still get a reward for their work. This seems unfair to those that just
|
||||||
started mining during this current round, but over time it balances out and rewards
|
started mining during this current round, but over time it balances out and rewards
|
||||||
loyal miners. */
|
loyal miners. */
|
||||||
Object.keys(workerShares).forEach(function(worker){
|
round.workerShares = workerShares;
|
||||||
orphanMergeCommands.push(['hincrby', coin + '_shares:roundCurrent',
|
|
||||||
worker, workerShares[worker]]);
|
|
||||||
});
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'generate':
|
case 'generate':
|
||||||
/* We found a confirmed block! Now get the reward for it and calculate how much
|
/* We found a confirmed block! Now get the reward for it and calculate how much
|
||||||
we owe each miner based on the shares they submitted during that block round. */
|
we owe each miner based on the shares they submitted during that block round. */
|
||||||
var reward = round.reward * (1 - processingConfig.feePercent);
|
var reward = parseInt(round.reward * magnitude);
|
||||||
|
|
||||||
var totalShares = Object.keys(workerShares).reduce(function(p, c){
|
var totalShares = Object.keys(workerShares).reduce(function(p, c){
|
||||||
return p + parseFloat(workerShares[c])
|
return p + parseFloat(workerShares[c])
|
||||||
}, 0);
|
}, 0);
|
||||||
|
|
||||||
for (var worker in workerShares){
|
for (var workerAddress in workerShares){
|
||||||
var percent = parseFloat(workerShares[worker]) / totalShares;
|
var percent = parseFloat(workerShares[workerAddress]) / totalShares;
|
||||||
var workerRewardTotal = Math.floor(reward * percent);
|
var workerRewardTotal = Math.floor(reward * percent);
|
||||||
if (!(worker in workerRewards)) workerRewards[worker] = 0;
|
var worker = workers[workerAddress] = (workers[workerAddress] || {});
|
||||||
workerRewards[worker] += workerRewardTotal;
|
worker.reward = (worker.reward || 0) + workerRewardTotal;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
});
|
});
|
||||||
|
|
||||||
callback(null, rounds, magnitude, workerRewards, orphanMergeCommands, addressAccount);
|
callback(null, workers, rounds, addressAccount);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
/* Does a batch call to redis to get worker existing balances from coin_balances*/
|
|
||||||
function(rounds, magnitude, workerRewards, orphanMergeCommands, addressAccount, callback){
|
|
||||||
|
|
||||||
var workers = Object.keys(workerRewards);
|
|
||||||
|
|
||||||
redisClient.hmget([coin + '_balances'].concat(workers), function(error, results){
|
|
||||||
if (error && workers.length !== 0){
|
|
||||||
callback('Check finished - redis error with multi get balances ' + JSON.stringify(error));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
var workerBalances = {};
|
|
||||||
|
|
||||||
for (var i = 0; i < workers.length; i++){
|
|
||||||
workerBalances[workers[i]] = (parseInt(results[i]) || 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
callback(null, rounds, magnitude, workerRewards, orphanMergeCommands, workerBalances, addressAccount);
|
|
||||||
});
|
|
||||||
|
|
||||||
},
|
|
||||||
|
|
||||||
|
|
||||||
/* Calculate if any payments are ready to be sent and trigger them sending
|
/* Calculate if any payments are ready to be sent and trigger them sending
|
||||||
Get balance different for each address and pass it along as object of latest balances such as
|
Get balance different for each address and pass it along as object of latest balances such as
|
||||||
{worker1: balance1, worker2, balance2}
|
{worker1: balance1, worker2, balance2}
|
||||||
when deciding the sent balance, it the difference should be -1*amount they had in db,
|
when deciding the sent balance, it the difference should be -1*amount they had in db,
|
||||||
if not sending the balance, the differnce should be +(the amount they earned this round)
|
if not sending the balance, the differnce should be +(the amount they earned this round)
|
||||||
*/
|
*/
|
||||||
function(rounds, magnitude, workerRewards, orphanMergeCommands, workerBalances, addressAccount, callback){
|
function(workers, rounds, addressAccount, callback) {
|
||||||
|
|
||||||
//number of satoshis in a single coin unit - this can be different for coins so we calculate it :)
|
var trySend = function (withholdPercent) {
|
||||||
|
|
||||||
daemon.cmd('getbalance', [addressAccount || ''], function(results){
|
|
||||||
|
|
||||||
var totalBalance = results[0].response * magnitude;
|
|
||||||
var toBePaid = 0;
|
|
||||||
var workerPayments = {};
|
|
||||||
|
|
||||||
|
|
||||||
var balanceUpdateCommands = [];
|
|
||||||
var workerPayoutsCommand = [];
|
|
||||||
|
|
||||||
/* Here we add up all workers' previous unpaid balances plus their current rewards as we are
|
|
||||||
about to check if they reach the payout threshold. */
|
|
||||||
for (var worker in workerRewards){
|
|
||||||
workerPayments[worker] = ((workerPayments[worker] || 0) + workerRewards[worker]);
|
|
||||||
}
|
|
||||||
for (var worker in workerBalances){
|
|
||||||
workerPayments[worker] = ((workerPayments[worker] || 0) + workerBalances[worker]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Here we check if any of the workers reached their payout threshold, or delete them from the
|
|
||||||
pending payment ledger (the workerPayments object). */
|
|
||||||
if (Object.keys(workerPayments).length > 0){
|
|
||||||
var coinPrecision = magnitude.toString().length - 1;
|
|
||||||
for (var worker in workerPayments){
|
|
||||||
if (workerPayments[worker] < processingConfig.minimumPayment * magnitude){
|
|
||||||
/* The workers total earnings (balance + current reward) was not enough to warrant
|
|
||||||
a transaction, so we will store their balance in the database. Next time they
|
|
||||||
are rewarded it might reach the payout threshold. */
|
|
||||||
balanceUpdateCommands.push([
|
|
||||||
'hincrby',
|
|
||||||
coin + '_balances',
|
|
||||||
worker,
|
|
||||||
workerRewards[worker]
|
|
||||||
]);
|
|
||||||
delete workerPayments[worker];
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
//If worker had a balance that is about to be paid out, subtract it from the database
|
|
||||||
if (workerBalances[worker] !== 0){
|
|
||||||
balanceUpdateCommands.push([
|
|
||||||
'hincrby',
|
|
||||||
coin + '_balances',
|
|
||||||
worker,
|
|
||||||
-1 * workerBalances[worker]
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
var rewardInPrecision = (workerRewards[worker] / magnitude).toFixed(coinPrecision);
|
|
||||||
workerPayoutsCommand.push(['hincrbyfloat', coin + '_payouts', worker, rewardInPrecision]);
|
|
||||||
toBePaid += workerPayments[worker];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// txfee included in feeAmountToBeCollected
|
|
||||||
var leftOver = toBePaid / (1 - processingConfig.feePercent);
|
|
||||||
var feeAmountToBeCollected = toPrecision(leftOver * processingConfig.feePercent, coinPrecision);
|
|
||||||
var balanceLeftOver = totalBalance - toBePaid - feeAmountToBeCollected;
|
|
||||||
var minReserveSatoshis = processingConfig.minimumReserve * magnitude;
|
|
||||||
if (balanceLeftOver < minReserveSatoshis){
|
|
||||||
/* TODO: Need to convert all these variables into whole coin units before displaying because
|
|
||||||
humans aren't good at reading satoshi units. */
|
|
||||||
callback('Check finished - payments would wipe out minimum reserve, tried to pay out ' +
|
|
||||||
(toBePaid/magnitude) + ' and collect ' + (feeAmountToBeCollected/magnitude) + ' as fees' +
|
|
||||||
' but only have ' + (totalBalance/magnitude) + '. Left over balance would be ' + (balanceLeftOver/magnitude) +
|
|
||||||
', needs to be at least ' + (minReserveSatoshis/magnitude));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* Move pending blocks into either orphan for confirmed sets, and delete their no longer
|
|
||||||
required round/shares data. */
|
|
||||||
var movePendingCommands = [];
|
|
||||||
var roundsToDelete = [];
|
|
||||||
rounds.forEach(function(r){
|
|
||||||
|
|
||||||
var destinationSet = (function(){
|
|
||||||
switch(r.category){
|
|
||||||
case 'orphan': return '_blocksOrphaned';
|
|
||||||
case 'generate': return '_blocksConfirmed';
|
|
||||||
case 'dropkicked': return '_blocksDropKicked';
|
|
||||||
}
|
|
||||||
})();
|
|
||||||
movePendingCommands.push(['smove', coin + '_blocksPending', coin + destinationSet, r.serialized]);
|
|
||||||
if (r.category === 'generate')
|
|
||||||
roundsToDelete.push(coin + '_shares:round' + r.height)
|
|
||||||
});
|
|
||||||
|
|
||||||
var finalRedisCommands = [];
|
|
||||||
|
|
||||||
if (movePendingCommands.length > 0)
|
|
||||||
finalRedisCommands = finalRedisCommands.concat(movePendingCommands);
|
|
||||||
|
|
||||||
if (orphanMergeCommands.length > 0)
|
|
||||||
finalRedisCommands = finalRedisCommands.concat(orphanMergeCommands);
|
|
||||||
|
|
||||||
if (balanceUpdateCommands.length > 0)
|
|
||||||
finalRedisCommands = finalRedisCommands.concat(balanceUpdateCommands);
|
|
||||||
|
|
||||||
if (workerPayoutsCommand.length > 0)
|
|
||||||
finalRedisCommands = finalRedisCommands.concat(workerPayoutsCommand);
|
|
||||||
|
|
||||||
if (roundsToDelete.length > 0)
|
|
||||||
finalRedisCommands.push(['del'].concat(roundsToDelete));
|
|
||||||
|
|
||||||
if (toBePaid !== 0)
|
|
||||||
finalRedisCommands.push(['hincrbyfloat', coin + '_stats', 'totalPaid', (toBePaid / magnitude).toFixed(coinPrecision)]);
|
|
||||||
|
|
||||||
finalRedisCommands.push(['del', coin + '_finalRedisCommands']);
|
|
||||||
|
|
||||||
finalRedisCommands.push(['bgsave']);
|
|
||||||
|
|
||||||
callback(null, magnitude, workerPayments, finalRedisCommands, addressAccount);
|
|
||||||
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
function(magnitude, workerPayments, finalRedisCommands, addressAccount, callback) {
|
|
||||||
/* Save final redis cleanout commands in case something goes wrong during payments */
|
|
||||||
redisClient.set(coin + '_finalRedisCommands', JSON.stringify(finalRedisCommands), function(error, reply) {
|
|
||||||
if (error){
|
|
||||||
callback('Check finished - error with saving finalRedisCommands' + JSON.stringify(error));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
callback(null, magnitude, workerPayments, finalRedisCommands, addressAccount);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
function(magnitude, workerPayments, finalRedisCommands, addressAccount, callback){
|
|
||||||
|
|
||||||
//This does the final all-or-nothing atom transaction if block deamon sent payments
|
|
||||||
var finalizeRedisTx = function(){
|
|
||||||
redisClient.multi(finalRedisCommands).exec(function(error, results){
|
|
||||||
if (error){
|
|
||||||
callback('Error with final redis commands for cleaning up ' + JSON.stringify(error));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
processingPayments = false;
|
|
||||||
logger.debug(logSystem, logComponent, 'Payments processing performed an interval');
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
if (Object.keys(workerPayments).length === 0){
|
|
||||||
finalizeRedisTx();
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
|
|
||||||
//This is how many decimal places to round a coin down to
|
|
||||||
var coinPrecision = magnitude.toString().length - 1;
|
|
||||||
var addressAmounts = {};
|
var addressAmounts = {};
|
||||||
var totalAmountUnits = 0;
|
var totalSent = 0;
|
||||||
for (var address in workerPayments){
|
for (var w in workers) {
|
||||||
var coinUnits = toPrecision(workerPayments[address] / magnitude, coinPrecision);
|
var worker = workers[w];
|
||||||
var properAddress = getProperAddress(address);
|
worker.balance = worker.balance || 0;
|
||||||
if (!properAddress){
|
worker.reward = worker.reward || 0;
|
||||||
logger.error(logSystem, logComponent, 'Could not convert pubkey ' + address + ' into address');
|
var toSend = (worker.balance + worker.reward) * (1 - withholdPercent);
|
||||||
continue;
|
if (toSend >= minPaymentSatoshis) {
|
||||||
|
totalSent += toSend;
|
||||||
|
var address = worker.address = (worker.address || getProperAddress(w));
|
||||||
|
worker.sent = addressAmounts[address] = satoshisToCoins(toSend);
|
||||||
|
worker.balanceChange = Math.min(worker.balance, toSend) * -1;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
worker.balanceChange = Math.max(toSend - worker.balance, 0);
|
||||||
|
worker.sent = 0;
|
||||||
}
|
}
|
||||||
addressAmounts[properAddress] = coinUnits;
|
|
||||||
totalAmountUnits += coinUnits;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.debug(logSystem, logComponent, 'Payments to be sent to: ' + JSON.stringify(addressAmounts));
|
if (Object.keys(addressAmounts).length === 0){
|
||||||
|
callback(null, workers, rounds);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
processingPayments = true;
|
daemon.cmd('sendmany', [addressAccount || '', addressAmounts], function (result) {
|
||||||
daemon.cmd('sendmany', [addressAccount || '', addressAmounts], function(results){
|
if (result.error && result.error.code === -6) {
|
||||||
|
var higherPercent = withholdPercent + 0.01;
|
||||||
if (results[0].error){
|
console.log('asdfasdfsadfasdf');
|
||||||
callback('Check finished - error with sendmany ' + JSON.stringify(results[0].error));
|
logger.warning(logSystem, logComponent, 'Not enough funds to send out payments, decreasing rewards by '
|
||||||
return;
|
+ (higherPercent * 100) + '% and retrying');
|
||||||
|
trySend(higherPercent);
|
||||||
}
|
}
|
||||||
|
else if (result.error) {
|
||||||
finalizeRedisTx();
|
logger.error(logSystem, logComponent, 'Error trying to send payments wtih RCP sendmany '
|
||||||
|
+ JSON.stringify(result.error));
|
||||||
var totalWorkers = Object.keys(workerPayments).length;
|
callback(true);
|
||||||
|
}
|
||||||
logger.debug(logSystem, logComponent, 'Payments sent, a total of ' + totalAmountUnits
|
else {
|
||||||
+ ' ' + poolOptions.coin.symbol + ' was sent to ' + totalWorkers + ' miners');
|
logger.debug(logSystem, logComponent, 'Sent out a total of ' + (totalSent / magnitude)
|
||||||
|
+ ' to ' + Object.keys(addressAmounts).length + ' workers');
|
||||||
daemon.cmd('gettransaction', [results[0].response], function(results){
|
if (withholdPercent > 0) {
|
||||||
if (results[0].error){
|
logger.warning(logSystem, logComponent, 'Had to withhold ' + (withholdPercent * 100)
|
||||||
callback('Check finished - error with gettransaction ' + JSON.stringify(results[0].error));
|
+ '% of reward from miners to cover transaction fees. '
|
||||||
return;
|
+ 'Fund pool wallet with coins to prevent this from happening');
|
||||||
}
|
}
|
||||||
var feeAmountUnits = parseFloat((totalAmountUnits / (1 - processingConfig.feePercent) * processingConfig.feePercent).toFixed(coinPrecision));
|
callback(null, workers, rounds);
|
||||||
var poolFees = feeAmountUnits - results[0].response.fee;
|
}
|
||||||
daemon.cmd('move', [addressAccount || '', processingConfig.feeCollectAccount, poolFees], function(results){
|
}, true, true);
|
||||||
if (results[0].error){
|
};
|
||||||
callback('Check finished - error with move ' + JSON.stringify(results[0].error));
|
trySend(0);
|
||||||
return;
|
|
||||||
}
|
|
||||||
callback(null, poolFees + ' ' + poolOptions.coin.symbol + ' collected as pool fee');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
], function(error, result){
|
|
||||||
|
|
||||||
|
},
|
||||||
|
function(workers, rounds, callback){
|
||||||
|
|
||||||
|
var totalPaid = 0;
|
||||||
|
|
||||||
|
var balanceUpdateCommands = [];
|
||||||
|
var workerPayoutsCommand = [];
|
||||||
|
|
||||||
|
for (var w in workers) {
|
||||||
|
var worker = workers[w];
|
||||||
|
if (worker.balanceChange !== 0){
|
||||||
|
balanceUpdateCommands.push([
|
||||||
|
'hincrby',
|
||||||
|
coin + '_balances',
|
||||||
|
w,
|
||||||
|
worker.balanceChange
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
if (worker.sent !== 0){
|
||||||
|
workerPayoutsCommand.push(['hincrbyfloat', coin + '_payouts', w, worker.sent]);
|
||||||
|
totalPaid += worker.sent;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
var movePendingCommands = [];
|
||||||
|
var roundsToDelete = [];
|
||||||
|
var orphanMergeCommands = [];
|
||||||
|
|
||||||
|
rounds.forEach(function(r){
|
||||||
|
|
||||||
|
switch(r.category){
|
||||||
|
case 'orphan':
|
||||||
|
movePendingCommands.push(['smove', coin + '_blocksPending', coin + '_blocksOrphaned', r.serialized]);
|
||||||
|
var workerShares = r.workerShares;
|
||||||
|
Object.keys(workerShares).forEach(function(worker){
|
||||||
|
orphanMergeCommands.push(['hincrby', coin + '_shares:roundCurrent',
|
||||||
|
worker, workerShares[worker]]);
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case 'generate':
|
||||||
|
movePendingCommands.push(['smove', coin + '_blocksPending', coin + '_blocksConfirmed', r.serialized]);
|
||||||
|
roundsToDelete.push(coin + '_shares:round' + r.height);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
var finalRedisCommands = [];
|
||||||
|
|
||||||
|
if (movePendingCommands.length > 0)
|
||||||
|
finalRedisCommands = finalRedisCommands.concat(movePendingCommands);
|
||||||
|
|
||||||
|
if (orphanMergeCommands.length > 0)
|
||||||
|
finalRedisCommands = finalRedisCommands.concat(orphanMergeCommands);
|
||||||
|
|
||||||
|
if (balanceUpdateCommands.length > 0)
|
||||||
|
finalRedisCommands = finalRedisCommands.concat(balanceUpdateCommands);
|
||||||
|
|
||||||
|
if (workerPayoutsCommand.length > 0)
|
||||||
|
finalRedisCommands = finalRedisCommands.concat(workerPayoutsCommand);
|
||||||
|
|
||||||
|
if (roundsToDelete.length > 0)
|
||||||
|
finalRedisCommands.push(['del'].concat(roundsToDelete));
|
||||||
|
|
||||||
|
if (totalPaid !== 0)
|
||||||
|
finalRedisCommands.push(['hincrbyfloat', coin + '_stats', 'totalPaid', totalPaid]);
|
||||||
|
|
||||||
|
if (finalRedisCommands.length === 0){
|
||||||
|
callback();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
startRedisTimer();
|
||||||
|
redisClient.multi(finalRedisCommands).exec(function(error, results){
|
||||||
|
endRedisTimer();
|
||||||
|
if (error){
|
||||||
|
clearInterval(paymentInterval);
|
||||||
|
logger.error(logSystem, logComponent,
|
||||||
|
'Payments sent but could not update redis. ' + JSON.stringify(error)
|
||||||
|
+ ' Disabling payment processing to prevent possible double-payouts. The redis commands in '
|
||||||
|
+ coin + '_finalRedisCommands.txt must be ran manually');
|
||||||
|
fs.writeFile(coin + '_finalRedisCommands.txt', JSON.stringify(finalRedisCommands), function(err){
|
||||||
|
logger.error('Could not write finalRedisCommands.txt, you are fucked.');
|
||||||
|
});
|
||||||
|
}
|
||||||
|
callback();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
], function(){
|
||||||
|
|
||||||
var paymentProcessTime = Date.now() - startPaymentProcess;
|
var paymentProcessTime = Date.now() - startPaymentProcess;
|
||||||
|
logger.debug(logSystem, logComponent, 'Finished interval - time spent: '
|
||||||
|
+ paymentProcessTime + 'ms total, ' + timeSpentRedis + 'ms redis, '
|
||||||
|
+ timeSpentRPC + 'ms daemon RPC');
|
||||||
|
|
||||||
if (error)
|
|
||||||
logger.debug(logSystem, logComponent, '[Took ' + paymentProcessTime + 'ms] ' + error);
|
|
||||||
|
|
||||||
else{
|
|
||||||
logger.debug(logSystem, logComponent, '[' + paymentProcessTime + 'ms] ' + result);
|
|
||||||
// not sure if we need some time to let daemon update the wallet balance
|
|
||||||
setTimeout(withdrawalProfit, 1000);
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -639,37 +499,5 @@ function SetupForPool(logger, poolOptions, setupFinished){
|
||||||
else return address;
|
else return address;
|
||||||
};
|
};
|
||||||
|
|
||||||
var withdrawalProfit = function(){
|
|
||||||
|
|
||||||
if (!processingConfig.feeWithdrawalThreshold) return;
|
|
||||||
|
|
||||||
logger.debug(logSystem, logComponent, 'Profit withdrawal started');
|
|
||||||
daemon.cmd('getbalance', [processingConfig.feeCollectAccount], function(results){
|
|
||||||
|
|
||||||
// We have to pay some tx fee here too but maybe we shoudn't really care about it too much as long as fee is less
|
|
||||||
// then minimumReserve value. Because in this case even if feeCollectAccount account will have negative balance
|
|
||||||
// total wallet balance will be positive and feeCollectAccount account will be refilled during next payment processing.
|
|
||||||
var withdrawalAmount = results[0].response;
|
|
||||||
|
|
||||||
if (withdrawalAmount < processingConfig.feeWithdrawalThreshold){
|
|
||||||
logger.debug(logSystem, logComponent, 'Not enough profit to withdraw yet');
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
|
|
||||||
var withdrawal = {};
|
|
||||||
withdrawal[processingConfig.feeReceiveAddress] = withdrawalAmount;
|
|
||||||
|
|
||||||
daemon.cmd('sendmany', [processingConfig.feeCollectAccount, withdrawal], function(results){
|
|
||||||
if (results[0].error){
|
|
||||||
logger.debug(logSystem, logComponent, 'Profit withdrawal of ' + withdrawalAmount + ' failed - error with sendmany '
|
|
||||||
+ JSON.stringify(results[0].error));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
logger.debug(logSystem, logComponent, 'Profit sent, a total of ' + withdrawalAmount
|
|
||||||
+ ' ' + poolOptions.coin.symbol + ' was sent to ' + processingConfig.feeReceiveAddress);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
};
|
|
||||||
}
|
}
|
|
@ -108,10 +108,8 @@ module.exports = function(logger){
|
||||||
diff: function(){}
|
diff: function(){}
|
||||||
};
|
};
|
||||||
|
|
||||||
var shareProcessing = poolOptions.shareProcessing;
|
|
||||||
|
|
||||||
//Functions required for MPOS compatibility
|
//Functions required for MPOS compatibility
|
||||||
if (shareProcessing && shareProcessing.mpos && shareProcessing.mpos.enabled){
|
if (poolOptions.mposMode && poolOptions.mposMode.enabled){
|
||||||
var mposCompat = new MposCompatibility(logger, poolOptions);
|
var mposCompat = new MposCompatibility(logger, poolOptions);
|
||||||
|
|
||||||
handlers.auth = function(port, workerName, password, authCallback){
|
handlers.auth = function(port, workerName, password, authCallback){
|
||||||
|
@ -128,12 +126,12 @@ module.exports = function(logger){
|
||||||
}
|
}
|
||||||
|
|
||||||
//Functions required for internal payment processing
|
//Functions required for internal payment processing
|
||||||
else if (shareProcessing && shareProcessing.internal && shareProcessing.internal.enabled){
|
else{
|
||||||
|
|
||||||
var shareProcessor = new ShareProcessor(logger, poolOptions);
|
var shareProcessor = new ShareProcessor(logger, poolOptions);
|
||||||
|
|
||||||
handlers.auth = function(port, workerName, password, authCallback){
|
handlers.auth = function(port, workerName, password, authCallback){
|
||||||
if (shareProcessing.internal.validateWorkerAddress !== true)
|
if (poolOptions.validateWorkerUsername !== true)
|
||||||
authCallback(true);
|
authCallback(true);
|
||||||
else {
|
else {
|
||||||
port = port.toString();
|
port = port.toString();
|
||||||
|
@ -238,10 +236,7 @@ module.exports = function(logger){
|
||||||
});*/
|
});*/
|
||||||
|
|
||||||
redisClient.hgetall("proxyState", function(error, obj) {
|
redisClient.hgetall("proxyState", function(error, obj) {
|
||||||
if (error || obj == null) {
|
if (!error && obj) {
|
||||||
//logger.debug(logSystem, logComponent, logSubCat, 'No last proxy state found in redis');
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
proxyState = obj;
|
proxyState = obj;
|
||||||
logger.debug(logSystem, logComponent, logSubCat, 'Last proxy state loaded from redis');
|
logger.debug(logSystem, logComponent, logSubCat, 'Last proxy state loaded from redis');
|
||||||
}
|
}
|
||||||
|
@ -258,64 +253,49 @@ module.exports = function(logger){
|
||||||
|
|
||||||
var algorithm = portalConfig.switching[switchName].algorithm;
|
var algorithm = portalConfig.switching[switchName].algorithm;
|
||||||
|
|
||||||
if (portalConfig.switching[switchName].enabled === true) {
|
if (!portalConfig.switching[switchName].enabled) return;
|
||||||
var initalPool = proxyState.hasOwnProperty(algorithm) ? proxyState[algorithm] : _this.getFirstPoolForAlgorithm(algorithm);
|
|
||||||
proxySwitch[switchName] = {
|
|
||||||
algorithm: algorithm,
|
|
||||||
ports: portalConfig.switching[switchName].ports,
|
|
||||||
currentPool: initalPool,
|
|
||||||
servers: []
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
// Copy diff and vardiff configuation into pools that match our algorithm so the stratum server can pick them up
|
var initalPool = proxyState.hasOwnProperty(algorithm) ? proxyState[algorithm] : _this.getFirstPoolForAlgorithm(algorithm);
|
||||||
//
|
proxySwitch[switchName] = {
|
||||||
// Note: This seems a bit wonky and brittle - better if proxy just used the diff config of the port it was
|
algorithm: algorithm,
|
||||||
// routed into instead.
|
ports: portalConfig.switching[switchName].ports,
|
||||||
//
|
currentPool: initalPool,
|
||||||
/*if (portalConfig.proxy[algorithm].hasOwnProperty('varDiff')) {
|
servers: []
|
||||||
proxySwitch[algorithm].varDiff = new Stratum.varDiff(proxySwitch[algorithm].port, portalConfig.proxy[algorithm].varDiff);
|
};
|
||||||
proxySwitch[algorithm].diff = portalConfig.proxy[algorithm].diff;
|
|
||||||
}*/
|
|
||||||
|
|
||||||
|
|
||||||
|
Object.keys(pools).forEach(function (coinName) {
|
||||||
Object.keys(pools).forEach(function (coinName) {
|
var p = pools[coinName];
|
||||||
var p = pools[coinName];
|
if (poolConfigs[coinName].coin.algorithm === algorithm) {
|
||||||
if (poolConfigs[coinName].coin.algorithm === algorithm) {
|
for (var port in portalConfig.switching[switchName].ports) {
|
||||||
for (var port in portalConfig.switching[switchName].ports) {
|
if (portalConfig.switching[switchName].ports[port].varDiff)
|
||||||
if (portalConfig.switching[switchName].ports[port].varDiff)
|
p.setVarDiff(port, portalConfig.switching[switchName].ports[port].varDiff);
|
||||||
p.setVarDiff(port, portalConfig.switching[switchName].ports[port].varDiff);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
Object.keys(proxySwitch[switchName].ports).forEach(function(port){
|
||||||
|
var f = net.createServer(function(socket) {
|
||||||
|
var currentPool = proxySwitch[switchName].currentPool;
|
||||||
|
|
||||||
|
logger.debug(logSystem, 'Connect', logSubCat, 'Connection to '
|
||||||
|
+ switchName + ' from '
|
||||||
|
+ socket.remoteAddress + ' on '
|
||||||
|
+ port + ' routing to ' + currentPool);
|
||||||
|
|
||||||
|
pools[currentPool].getStratumServer().handleNewClient(socket);
|
||||||
|
|
||||||
|
}).listen(parseInt(port), function() {
|
||||||
|
logger.debug(logSystem, logComponent, logSubCat, 'Switching "' + switchName
|
||||||
|
+ '" listening for ' + algorithm
|
||||||
|
+ ' on port ' + port
|
||||||
|
+ ' into ' + proxySwitch[switchName].currentPool);
|
||||||
});
|
});
|
||||||
|
proxySwitch[switchName].servers.push(f);
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
Object.keys(proxySwitch[switchName].ports).forEach(function(port){
|
|
||||||
var f = net.createServer(function(socket) {
|
|
||||||
var currentPool = proxySwitch[switchName].currentPool;
|
|
||||||
|
|
||||||
logger.debug(logSystem, 'Connect', logSubCat, 'Connection to '
|
|
||||||
+ switchName + ' from '
|
|
||||||
+ socket.remoteAddress + ' on '
|
|
||||||
+ port + ' routing to ' + currentPool);
|
|
||||||
|
|
||||||
pools[currentPool].getStratumServer().handleNewClient(socket);
|
|
||||||
|
|
||||||
}).listen(parseInt(port), function() {
|
|
||||||
logger.debug(logSystem, logComponent, logSubCat, 'Switching "' + switchName
|
|
||||||
+ '" listening for ' + algorithm
|
|
||||||
+ ' on port ' + port
|
|
||||||
+ ' into ' + proxySwitch[switchName].currentPool);
|
|
||||||
});
|
|
||||||
proxySwitch[switchName].servers.push(f);
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
//logger.debug(logSystem, logComponent, logSubCat, 'Proxy pool for ' + algorithm + ' disabled.');
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -399,7 +399,7 @@ module.exports = function(logger){
|
||||||
Object.keys(profitStatus[algo]).forEach(function(symbol){
|
Object.keys(profitStatus[algo]).forEach(function(symbol){
|
||||||
var coinName = profitStatus[algo][symbol].name;
|
var coinName = profitStatus[algo][symbol].name;
|
||||||
var poolConfig = poolConfigs[coinName];
|
var poolConfig = poolConfigs[coinName];
|
||||||
var daemonConfig = poolConfig.shareProcessing.internal.daemon;
|
var daemonConfig = poolConfig.paymentProcessing.daemon;
|
||||||
daemonTasks.push(function(callback){
|
daemonTasks.push(function(callback){
|
||||||
_this.getDaemonInfoForCoin(symbol, daemonConfig, callback)
|
_this.getDaemonInfoForCoin(symbol, daemonConfig, callback)
|
||||||
});
|
});
|
||||||
|
|
|
@ -16,8 +16,7 @@ value: a hash with..
|
||||||
|
|
||||||
module.exports = function(logger, poolConfig){
|
module.exports = function(logger, poolConfig){
|
||||||
|
|
||||||
var internalConfig = poolConfig.shareProcessing.internal;
|
var redisConfig = poolConfig.redis;
|
||||||
var redisConfig = internalConfig.redis;
|
|
||||||
var coin = poolConfig.coin.name;
|
var coin = poolConfig.coin.name;
|
||||||
|
|
||||||
var forkId = process.env.forkId;
|
var forkId = process.env.forkId;
|
||||||
|
@ -60,7 +59,7 @@ module.exports = function(logger, poolConfig){
|
||||||
|
|
||||||
if (isValidBlock){
|
if (isValidBlock){
|
||||||
redisCommands.push(['rename', coin + '_shares:roundCurrent', coin + '_shares:round' + shareData.height]);
|
redisCommands.push(['rename', coin + '_shares:roundCurrent', coin + '_shares:round' + shareData.height]);
|
||||||
redisCommands.push(['sadd', coin + '_blocksPending', [shareData.blockHash, shareData.txHash, shareData.height, shareData.reward].join(':')]);
|
redisCommands.push(['sadd', coin + '_blocksPending', [shareData.blockHash, shareData.txHash, shareData.height].join(':')]);
|
||||||
redisCommands.push(['hincrby', coin + '_stats', 'validBlocks', 1]);
|
redisCommands.push(['hincrby', coin + '_stats', 'validBlocks', 1]);
|
||||||
}
|
}
|
||||||
else if (shareData.blockHash){
|
else if (shareData.blockHash){
|
||||||
|
@ -70,8 +69,6 @@ module.exports = function(logger, poolConfig){
|
||||||
connection.multi(redisCommands).exec(function(err, replies){
|
connection.multi(redisCommands).exec(function(err, replies){
|
||||||
if (err)
|
if (err)
|
||||||
logger.error(logSystem, logComponent, logSubCat, 'Error with share processor multi ' + JSON.stringify(err));
|
logger.error(logSystem, logComponent, logSubCat, 'Error with share processor multi ' + JSON.stringify(err));
|
||||||
//else
|
|
||||||
//logger.debug(logSystem, logComponent, logSubCat, 'Share data and stats recorded');
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -35,14 +35,7 @@ module.exports = function(logger, portalConfig, poolConfigs){
|
||||||
|
|
||||||
var poolConfig = poolConfigs[coin];
|
var poolConfig = poolConfigs[coin];
|
||||||
|
|
||||||
if (!poolConfig.shareProcessing || !poolConfig.shareProcessing.internal){
|
var redisConfig = poolConfig.redis;
|
||||||
logger.error(logSystem, coin, 'Cannot do stats without internal share processing setup');
|
|
||||||
canDoStats = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
var internalConfig = poolConfig.shareProcessing.internal;
|
|
||||||
var redisConfig = internalConfig.redis;
|
|
||||||
|
|
||||||
for (var i = 0; i < redisClients.length; i++){
|
for (var i = 0; i < redisClients.length; i++){
|
||||||
var client = redisClients[i];
|
var client = redisClients[i];
|
||||||
|
@ -115,7 +108,7 @@ module.exports = function(logger, portalConfig, poolConfigs){
|
||||||
var redisCommands = [];
|
var redisCommands = [];
|
||||||
|
|
||||||
|
|
||||||
var redisComamndTemplates = [
|
var redisCommandTemplates = [
|
||||||
['zremrangebyscore', '_hashrate', '-inf', '(' + windowTime],
|
['zremrangebyscore', '_hashrate', '-inf', '(' + windowTime],
|
||||||
['zrangebyscore', '_hashrate', windowTime, '+inf'],
|
['zrangebyscore', '_hashrate', windowTime, '+inf'],
|
||||||
['hgetall', '_stats'],
|
['hgetall', '_stats'],
|
||||||
|
@ -124,10 +117,10 @@ module.exports = function(logger, portalConfig, poolConfigs){
|
||||||
['scard', '_blocksOrphaned']
|
['scard', '_blocksOrphaned']
|
||||||
];
|
];
|
||||||
|
|
||||||
var commandsPerCoin = redisComamndTemplates.length;
|
var commandsPerCoin = redisCommandTemplates.length;
|
||||||
|
|
||||||
client.coins.map(function(coin){
|
client.coins.map(function(coin){
|
||||||
redisComamndTemplates.map(function(t){
|
redisCommandTemplates.map(function(t){
|
||||||
var clonedTemplates = t.slice(0);
|
var clonedTemplates = t.slice(0);
|
||||||
clonedTemplates[1] = coin + clonedTemplates[1];
|
clonedTemplates[1] = coin + clonedTemplates[1];
|
||||||
redisCommands.push(clonedTemplates);
|
redisCommands.push(clonedTemplates);
|
||||||
|
|
|
@ -151,7 +151,7 @@ module.exports = function(logger){
|
||||||
for (var pName in poolConfigs){
|
for (var pName in poolConfigs){
|
||||||
if (pName.toLowerCase() === c)
|
if (pName.toLowerCase() === c)
|
||||||
return {
|
return {
|
||||||
daemon: poolConfigs[pName].shareProcessing.internal.daemon,
|
daemon: poolConfigs[pName].paymentProcessing.daemon,
|
||||||
address: poolConfigs[pName].address
|
address: poolConfigs[pName].address
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,13 @@
|
||||||
"coin": "litecoin.json",
|
"coin": "litecoin.json",
|
||||||
|
|
||||||
"address": "n4jSe18kZMCdGcZqaYprShXW6EH1wivUK1",
|
"address": "n4jSe18kZMCdGcZqaYprShXW6EH1wivUK1",
|
||||||
|
|
||||||
|
"rewardRecipients": {
|
||||||
|
"n37vuNFkXfk15uFnGoVyHZ6PYQxppD3QqK": 1.5,
|
||||||
|
"mirj3LtZxbSTharhtXvotqtJXUY7ki5qfx": 0.5,
|
||||||
|
"22851477d63a085dbc2398c8430af1c09e7343f6": 0.1
|
||||||
|
},
|
||||||
|
|
||||||
"blockRefreshInterval": 1000,
|
"blockRefreshInterval": 1000,
|
||||||
"txRefreshInterval": 20000,
|
"txRefreshInterval": 20000,
|
||||||
"jobRebroadcastTimeout": 55,
|
"jobRebroadcastTimeout": 55,
|
||||||
|
@ -12,50 +19,47 @@
|
||||||
|
|
||||||
"tcpProxyProtocol": false,
|
"tcpProxyProtocol": false,
|
||||||
|
|
||||||
"shareProcessing": {
|
"validateWorkerUsername": true,
|
||||||
"internal": {
|
|
||||||
"enabled": true,
|
"paymentProcessing": {
|
||||||
"validateWorkerAddress": true,
|
"enabled": true,
|
||||||
"paymentInterval": 20,
|
"paymentInterval": 20,
|
||||||
"minimumPayment": 70,
|
"minimumPayment": 70,
|
||||||
"minimumReserve": 10,
|
"daemon": {
|
||||||
"feePercent": 0.05,
|
|
||||||
"feeCollectAccount": "feesCollected",
|
|
||||||
"feeReceiveAddress": "mppaGeNaSbG1Q7S6V3gL5uJztMhucgL9Vh",
|
|
||||||
"feeWithdrawalThreshold": 5,
|
|
||||||
"daemon": {
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
"port": 19332,
|
|
||||||
"user": "litecoinrpc",
|
|
||||||
"password": "testnet"
|
|
||||||
},
|
|
||||||
"redis": {
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
"port": 6379
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mpos": {
|
|
||||||
"enabled": false,
|
|
||||||
"host": "127.0.0.1",
|
"host": "127.0.0.1",
|
||||||
"port": 3306,
|
"port": 19332,
|
||||||
"user": "me",
|
"user": "litecoinrpc",
|
||||||
"password": "mypass",
|
"password": "testnet"
|
||||||
"database": "ltc",
|
|
||||||
"stratumAuth": "password"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"redis": {
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6379
|
||||||
|
},
|
||||||
|
|
||||||
|
"mposMode": {
|
||||||
|
"enabled": false,
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 3306,
|
||||||
|
"user": "me",
|
||||||
|
"password": "mypass",
|
||||||
|
"database": "ltc",
|
||||||
|
"checkPassword": true,
|
||||||
|
"autoCreateWorker": false
|
||||||
|
},
|
||||||
|
|
||||||
"banning": {
|
"banning": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"time": 600,
|
"time": 300,
|
||||||
"invalidPercent": 50,
|
"invalidPercent": 50,
|
||||||
"checkThreshold": 500,
|
"checkThreshold": 10,
|
||||||
"purgeInterval": 300
|
"purgeInterval": 300
|
||||||
},
|
},
|
||||||
|
|
||||||
"ports": {
|
"ports": {
|
||||||
"3008": {
|
"3008": {
|
||||||
"diff": 8
|
"diff": 4
|
||||||
},
|
},
|
||||||
"3032": {
|
"3032": {
|
||||||
"diff": 32,
|
"diff": 32,
|
||||||
|
@ -78,20 +82,14 @@
|
||||||
"port": 19332,
|
"port": 19332,
|
||||||
"user": "litecoinrpc",
|
"user": "litecoinrpc",
|
||||||
"password": "testnet"
|
"password": "testnet"
|
||||||
},
|
|
||||||
{
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
"port": 19344,
|
|
||||||
"user": "litecoinrpc",
|
|
||||||
"password": "testnet"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|
||||||
"p2p": {
|
"p2p": {
|
||||||
"enabled": false,
|
"enabled": true,
|
||||||
"host": "127.0.0.1",
|
"host": "127.0.0.1",
|
||||||
"port": 19333,
|
"port": 19333,
|
||||||
"disableTransactions": true,
|
"disableTransactions": true
|
||||||
"magic": "fcc1b7dc"
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
}
|
Loading…
Reference in New Issue