Added rewardRecipients configuration, refactored payment processing, moved p2p magic to coin config

This commit is contained in:
Matt 2014-05-02 15:59:46 -06:00
parent 80be932651
commit 73668709ce
11 changed files with 450 additions and 675 deletions

166
README.md
View File

@ -284,13 +284,22 @@ Here is an example of the required fields:
{
"name": "Litecoin",
"symbol": "ltc",
"algorithm": "scrypt", //or "sha256", "scrypt-jane", "scrypt-n", "quark", "x11"
"txMessages": false, //or true (not required, defaults to false)
"mposDiffMultiplier": 256, //only for x11 coins in mpos mode, set to 256 (optional)
"algorithm": "scrypt",
/* Magic value only required for setting up p2p block notifications. It is found in the daemon
source code as the pchMessageStart variable.
For example, litecoin mainnet magic: http://git.io/Bi8YFw
And for litecoin testnet magic: http://git.io/NXBYJA */
"peerMagic": "fbc0b6db" //optional
"peerMagicTestnet": "fcc1b7dc" //optional
//"txMessages": false, //options - defaults to false
//"mposDiffMultiplier": 256, //options - only for x11 coins in mpos mode
}
````
For additional documentation how to configure coins *(especially important for scrypt-n and scrypt-jane coins)*
For additional documentation how to configure coins and their different algorithms
see [these instructions](//github.com/zone117x/node-stratum-pool#module-usage).
@ -307,6 +316,17 @@ Description of options:
"address": "mi4iBXbBsydtcc5yFmsff2zCFVX4XG7qJc", //Address to where block rewards are given
/* Block rewards go to the configured pool wallet address to later be paid out to miners,
except for a percentages that can go to pool operator(s) as pool fees or donations.
Addresses or hashed public keys can be used. */
"rewardRecipients": {
"n37vuNFkXfk15uFnGoVyHZ6PYQxppD3QqK": 1.5, //1.5% goes to pool op
"mirj3LtZxbSTharhtXvotqtJXUY7ki5qfx": 0.5, //0.5% goes to a pool co-owner
//0.1% donation to NOMP to help support development
"22851477d63a085dbc2398c8430af1c09e7343f6": 0.1
},
"blockRefreshInterval": 1000, //How often to poll RPC daemons for new blocks, in milliseconds
/* How many milliseconds should have passed before new block transactions will trigger a new
@ -332,96 +352,68 @@ Description of options:
miners/pools that deal with scrypt use a guesstimated one that is about 5.86% off from the
actual one. So here we can set a tolerable threshold for if a share is slightly too low
due to mining apps using incorrect max diffs and this pool using correct max diffs. */
"shareVariancePercent": 10,
"shareVariancePercent": 2,
/* Enable for client IP addresses to be detected when using a load balancer with TCP proxy
protocol enabled, such as HAProxy with 'send-proxy' param:
http://haproxy.1wt.eu/download/1.5/doc/configuration.txt */
"tcpProxyProtocol": false,
/* To receive payments, miners must connect with their address or mining key as their username.
This option will only authenticate miners using an address or mining key. */
"validateWorkerAddress": true,
/* This determines what to do with submitted shares (and stratum worker authentication).
You have two options:
1) Enable internal and disable mpos = this portal to handle all share payments.
2) Enable mpos and disable internal = shares will be inserted into MySQL database
for MPOS to process. */
"shareProcessing": {
"paymentProcessing": {
"enabled": true,
"internal": {
"enabled": true,
/* Every this many seconds get submitted blocks from redis, use daemon RPC to check
their confirmation status, if confirmed then get shares from redis that contributed
to block and send out payments. */
"paymentInterval": 30,
/* When workers connect, to receive payments, their address must be used as the worker
name. If this option is true, on worker authentication, their address will be
verified via a validateaddress API call to the daemon. Miners with invalid addresses
will be rejected. */
"validateWorkerAddress": true,
/* Minimum number of coins that a miner must earn before sending payment. Typically,
a higher minimum means less transactions fees (you profit more) but miners see
payments less frequently (they dislike). Opposite for a lower minimum payment. */
"minimumPayment": 0.01,
/* Every this many seconds get submitted blocks from redis, use daemon RPC to check
their confirmation status, if confirmed then get shares from redis that contributed
to block and send out payments. */
"paymentInterval": 30,
/* Minimum number of coins that a miner must earn before sending payment. Typically,
a higher minimum means less transactions fees (you profit more) but miners see
payments less frequently (they dislike). Opposite for a lower minimum payment. */
"minimumPayment": 0.001,
/* Minimum number of coins to keep in pool wallet. It is recommended to deposit at
at least this many coins into the pool wallet when first starting the pool. */
"minimumReserve": 10,
/* (2% default) What percent fee your pool takes from the block reward. */
"feePercent": 0.02,
/* Name of the daemon account to use when moving coin profit within daemon wallet. */
"feeCollectAccount": "feesCollected",
/* Your address that receives pool revenue from fees. */
"feeReceiveAddress": "LZz44iyF4zLCXJTU8RxztyyJZBntdS6fvv",
/* How many coins from fee revenue must accumulate on top of the
minimum reserve amount in order to trigger withdrawal to fee address. The higher
this threshold, the less of your profit goes to transactions fees. */
"feeWithdrawalThreshold": 5,
/* This daemon is used to send out payments. It MUST be for the daemon that owns the
configured 'address' that receives the block rewards, otherwise the daemon will not
be able to confirm blocks or send out payments. */
"daemon": {
"host": "127.0.0.1",
"port": 19332,
"user": "litecoinrpc",
"password": "testnet"
},
/* Redis database used for storing share and block submission data. */
"redis": {
"host": "127.0.0.1",
"port": 6379
}
},
/* Enabled mpos and shares will be inserted into share table in a MySQL database. You may
also want to use the "emitInvalidBlockHashes" option below if you require it. */
"mpos": {
"enabled": false,
"host": "127.0.0.1", //MySQL db host
"port": 3306, //MySQL db port
"user": "me", //MySQL db user
"password": "mypass", //MySQL db password
"database": "ltc", //MySQL db database name
/* Unregistered workers can automatically be registered (added to database) on stratum
worker authentication if this is true. */
"autoCreateWorker": false,
/* For when miner's authenticate: set to "password" for both worker name and password to
be checked for in the database, set to "worker" for only work name to be checked, or
don't use this option (set to "none") for no auth checks */
"stratumAuth": "password"
/* This daemon is used to send out payments. It MUST be for the daemon that owns the
configured 'address' that receives the block rewards, otherwise the daemon will not
be able to confirm blocks or send out payments. */
"daemon": {
"host": "127.0.0.1",
"port": 19332,
"user": "litecoinrpc",
"password": "testnet"
}
},
/* Redis database used for storing share and block submission data and payment processing. */
"redis": {
"host": "127.0.0.1",
"port": 6379
}
/* Enabled this mode and shares will be inserted into in a MySQL database. You may also want
to use the "emitInvalidBlockHashes" option below if you require it. The config options
"redis" and "paymentProcessing" will be ignored/unused if this is enabled. */
"mposMode": {
"enabled": false,
"host": "127.0.0.1", //MySQL db host
"port": 3306, //MySQL db port
"user": "me", //MySQL db user
"password": "mypass", //MySQL db password
"database": "ltc", //MySQL db database name
/* Checks for valid password in database when miners connect. */
"checkPassword": true,
/* Unregistered workers can automatically be registered (added to database) on stratum
worker authentication if this is true. */
"autoCreateWorker": false
},
/* If a worker is submitting a high threshold of invalid shares we can temporarily ban their IP
to reduce system/network load. Also useful to fight against flooding attacks. If running
behind something like HAProxy be sure to enable 'tcpProxyProtocol', otherwise you'll end up
@ -476,8 +468,8 @@ Description of options:
/* This allows the pool to connect to the daemon as a node peer to receive block updates.
It may be the most efficient way to get block updates (faster than polling, less
intensive than blocknotify script). It requires additional setup: the 'magic' field must
be exact (extracted from the coin source code). */
intensive than blocknotify script). It requires the additional field "peerMagic" in
the coin config. */
"p2p": {
"enabled": false,
@ -490,13 +482,7 @@ Description of options:
/* If your coin daemon is new enough (i.e. not a shitcoin) then it will support a p2p
feature that prevents the daemon from spamming our peer node with unnecessary
transaction data. Assume its supported but if you have problems try disabling it. */
"disableTransactions": true,
/* Magic value is different for main/testnet and for each coin. It is found in the daemon
source code as the pchMessageStart variable.
For example, litecoin mainnet magic: http://git.io/Bi8YFw
And for litecoin testnet magic: http://git.io/NXBYJA */
"magic": "fcc1b7dc"
"disableTransactions": true
}
}

View File

@ -1,5 +1,7 @@
{
"name": "Litecoin",
"symbol": "LTC",
"algorithm": "scrypt"
"algorithm": "scrypt",
"peerMagic": "fbc0b6db",
"peerMagicTestnet": "fcc1b7dc"
}

View File

@ -161,13 +161,6 @@ var spawnPoolWorkers = function(){
Object.keys(poolConfigs).forEach(function(coin){
var p = poolConfigs[coin];
var internalEnabled = p.shareProcessing && p.shareProcessing.internal && p.shareProcessing.internal.enabled;
var mposEnabled = p.shareProcessing && p.shareProcessing.mpos && p.shareProcessing.mpos.enabled;
if (!internalEnabled && !mposEnabled){
logger.error('Master', coin, 'Share processing is not configured so a pool cannot be started for this coin.');
delete poolConfigs[coin];
}
if (!Array.isArray(p.daemons) || p.daemons.length < 1){
logger.error('Master', coin, 'No daemons configured so a pool cannot be started for this coin.');
@ -364,7 +357,7 @@ var startPaymentProcessor = function(){
var enabledForAny = false;
for (var pool in poolConfigs){
var p = poolConfigs[pool];
var enabled = p.enabled && p.shareProcessing && p.shareProcessing.internal && p.shareProcessing.internal.enabled;
var enabled = p.enabled && p.paymentProcessing && p.paymentProcessing.enabled;
if (enabled){
enabledForAny = true;
break;

View File

@ -2,31 +2,30 @@ var mysql = require('mysql');
var cluster = require('cluster');
module.exports = function(logger, poolConfig){
var mposConfig = poolConfig.shareProcessing.mpos;
var mposConfig = poolConfig.mposMode;
var coin = poolConfig.coin.name;
var connection;
var connection = mysql.createPool({
host: mposConfig.host,
port: mposConfig.port,
user: mposConfig.user,
password: mposConfig.password,
database: mposConfig.database
});
var logIdentify = 'MySQL';
var logComponent = coin;
function connect(){
connection = mysql.createPool({
host: mposConfig.host,
port: mposConfig.port,
user: mposConfig.user,
password: mposConfig.password,
database: mposConfig.database
});
}
connect();
this.handleAuth = function(workerName, password, authCallback){
if (poolConfig.validateWorkerUsername !== true && mposConfig.autoCreateWorker !== true){
authCallback(true);
return;
}
connection.query(
'SELECT password FROM pool_worker WHERE username = LOWER(?)',
[workerName.toLowerCase()],
@ -65,16 +64,15 @@ module.exports = function(logger, poolConfig){
}
}
);
}else{
}
else{
authCallback(false);
}
}
else if (mposConfig.stratumAuth === 'worker')
authCallback(true);
else if (result[0].password === password)
authCallback(true)
else
else if (mposConfig.checkPassword && result[0].password !== password)
authCallback(false);
else
authCallback(true);
}
);

View File

@ -13,9 +13,8 @@ module.exports = function(logger){
Object.keys(poolConfigs).forEach(function(coin) {
var poolOptions = poolConfigs[coin];
if (poolOptions.shareProcessing &&
poolOptions.shareProcessing.internal &&
poolOptions.shareProcessing.internal.enabled)
if (poolOptions.paymentProcessing &&
poolOptions.paymentProcessing.enabled)
enabledPools.push(coin);
});
@ -27,14 +26,14 @@ module.exports = function(logger){
coins.forEach(function(coin){
var poolOptions = poolConfigs[coin];
var processingConfig = poolOptions.shareProcessing.internal;
var processingConfig = poolOptions.paymentProcessing;
var logSystem = 'Payments';
var logComponent = coin;
logger.debug(logSystem, logComponent, 'Payment processing setup to run every '
+ processingConfig.paymentInterval + ' second(s) with daemon ('
+ processingConfig.daemon.user + '@' + processingConfig.daemon.host + ':' + processingConfig.daemon.port
+ ') and redis (' + processingConfig.redis.host + ':' + processingConfig.redis.port + ')');
+ ') and redis (' + poolOptions.redis.host + ':' + poolOptions.redis.port + ')');
});
});
@ -45,68 +44,64 @@ function SetupForPool(logger, poolOptions, setupFinished){
var coin = poolOptions.coin.name;
var processingConfig = poolOptions.shareProcessing.internal;
var processingConfig = poolOptions.paymentProcessing;
var logSystem = 'Payments';
var logComponent = coin;
var processingPayments = true;
var daemon = new Stratum.daemon.interface([processingConfig.daemon]);
var redisClient = redis.createClient(poolOptions.redis.port, poolOptions.redis.host);
var daemon;
var redisClient;
var magnitude;
var minPaymentSatoshis;
var coinPrecision;
var paymentInterval;
async.parallel([
function(callback){
daemon = new Stratum.daemon.interface([processingConfig.daemon]);
daemon.once('online', function(){
daemon.cmd('validateaddress', [poolOptions.address], function(result){
if (!result[0].response || !result[0].response.ismine){
logger.error(logSystem, logComponent,
daemon.cmd('validateaddress', [poolOptions.address], function(result) {
if (result.error){
logger.error(logSystem, logComponent, 'Error with payment processing daemon ' + JSON.stringify(result.error));
callback(true);
}
else if (!result.response || !result.response.ismine) {
logger.error(logSystem, logComponent,
'Daemon does not own pool address - payment processing can not be done with this daemon, '
+ JSON.stringify(result[0].response));
return;
}
+ JSON.stringify(result.response));
callback(true);
}
else{
callback()
});
}).once('connectionFailed', function(error){
logger.error(logSystem, logComponent, 'Failed to connect to daemon for payment processing: config ' +
JSON.stringify(processingConfig.daemon) + ', error: ' +
JSON.stringify(error));
callback('Error connecting to deamon');
}).on('error', function(error){
logger.error(logSystem, logComponent, 'Daemon error ' + JSON.stringify(error));
}).init();
}
}, true);
},
function(callback){
redisClient = redis.createClient(processingConfig.redis.port, processingConfig.redis.host);
redisClient.on('ready', function(){
if (callback) {
callback();
callback = null;
daemon.cmd('getbalance', [], function(result){
if (result.error){
callback(true);
return;
}
logger.debug(logSystem, logComponent, 'Connected to redis at '
+ processingConfig.redis.host + ':' + processingConfig.redis.port + ' for payment processing');
}).on('end', function(){
logger.error(logSystem, logComponent, 'Connection to redis database as been ended');
}).once('error', function(){
if (callback) {
logger.error(logSystem, logComponent, 'Failed to connect to redis at '
+ processingConfig.redis.host + ':' + processingConfig.redis.port + ' for payment processing');
callback('Error connecting to redis');
callback = null;
try {
var d = result.data.split('result":')[1].split(',')[0].split('.')[1];
magnitude = parseInt('10' + new Array(d.length).join('0'));
minPaymentSatoshis = parseInt(processingConfig.minimumPayment * magnitude);
coinPrecision = magnitude.toString().length - 1;
callback();
}
catch(e){
logger.error(logSystem, logComponent, 'Error detecting number of satoshis in a coin, cannot do payment processing');
callback(true);
}
});
}, true, true);
}
], function(err){
if (err){
setupFinished(false);
return;
}
setInterval(function(){
paymentInterval = setInterval(function(){
try {
processPayments();
} catch(e){
@ -118,97 +113,74 @@ function SetupForPool(logger, poolOptions, setupFinished){
});
/* Call redis to check if previous sendmany and/or redis cleanout commands completed successfully.
If sendmany worked fine but redis commands failed you HAVE TO run redis commands again
(manually) to prevent double payments. If sendmany failed too you can safely delete
coin + '_finalRedisCommands' string from redis to let pool calculate payments again. */
function checkPreviousPaymentsStatus(callback) {
redisClient.get(coin + '_finalRedisCommands', function(error, reply) {
if (error){
callback('Could not get finalRedisCommands - ' + JSON.stringify(error));
return;
}
if (reply) {
callback('Payments stopped because of the critical error - failed commands saved in '
+ coin + '_finalRedisCommands redis set:\n' + reply);
return;
} else {
/* There was no error in previous sendmany and/or redis cleanout commands
so we can safely continue */
processingPayments = false;
callback();
}
});
}
/* Number.toFixed gives us the decimal places we want, but as a string. parseFloat turns it back into number
we don't care about trailing zeros in this case. */
var toPrecision = function(value, precision){
return parseFloat(value.toFixed(precision));
var satoshisToCoins = function(satoshis){
return parseFloat((satoshis / magnitude).toFixed(coinPrecision));
};
/* Deal with numbers in smallest possible units (satoshis) as much as possible. This greatly helps with accuracy
when rounding and whatnot. When we are storing numbers for only humans to see, store in whole coin units. */
var processPayments = function(){
var startPaymentProcess = Date.now();
async.waterfall([
var timeSpentRPC = 0;
var timeSpentRedis = 0;
function(callback) {
if (processingPayments) {
checkPreviousPaymentsStatus(function(error){
if (error) {
logger.error(logSystem, logComponent, error);
callback('Check finished - previous payments processing error');
return;
}
callback();
});
return;
}
callback();
},
var startTimeRedis;
var startTimeRPC;
var startRedisTimer = function(){ startTimeRedis = Date.now() };
var endRedisTimer = function(){ timeSpentRedis += Date.now() - startTimeRedis };
var startRPCTimer = function(){ startTimeRPC = Date.now(); };
var endRPCTimer = function(){ timeSpentRPC += Date.now() - startTimeRedis };
async.waterfall([
/* Call redis to get an array of rounds - which are coinbase transactions and block heights from submitted
blocks. */
function(callback){
redisClient.smembers(coin + '_blocksPending', function(error, results){
startRedisTimer();
redisClient.multi([
['hgetall', coin + '_balances'],
['smembers', coin + '_blocksPending']
]).exec(function(error, results){
endRedisTimer();
if (error){
logger.error(logSystem, logComponent, 'Could not get blocks from redis ' + JSON.stringify(error));
callback('Check finished - redis error for getting blocks');
return;
}
if (results.length === 0){
callback('Check finished - no pending blocks in redis');
callback(true);
return;
}
var rounds = results.map(function(r){
var workers = {};
for (var w in results[0]){
workers[w] = {balance: parseInt(results[0][w])};
}
var rounds = results[1].map(function(r){
var details = r.split(':');
return {
category: details[0].category,
blockHash: details[0],
txHash: details[1],
height: details[2],
reward: details[3],
serialized: r
};
});
callback(null, rounds);
callback(null, workers, rounds);
});
},
/* Does a batch rpc call to daemon with all the transaction hashes to see if they are confirmed yet.
It also adds the block reward amount to the round object - which the daemon gives also gives us. */
function(rounds, callback){
function(workers, rounds, callback){
var batchRPCcommand = rounds.map(function(r){
return ['gettransaction', [r.txHash]];
@ -216,11 +188,14 @@ function SetupForPool(logger, poolOptions, setupFinished){
batchRPCcommand.push(['getaccount', [poolOptions.address]]);
startRPCTimer();
daemon.batchCmd(batchRPCcommand, function(error, txDetails){
endRPCTimer();
if (error || !txDetails){
callback('Check finished - daemon rpc error with batch gettransactions ' +
JSON.stringify(error));
logger.error(logSystem, logComponent, 'Check finished - daemon rpc error with batch gettransactions '
+ JSON.stringify(error));
callback(true);
return;
}
@ -235,70 +210,51 @@ function SetupForPool(logger, poolOptions, setupFinished){
var round = rounds[i];
if (tx.error && tx.error.code === -5 || round.blockHash !== tx.result.blockhash){
/* Block was dropped from coin daemon even after it happily accepted it earlier. */
//If we find another block at the same height then this block was drop-kicked orphaned
var dropKicked = rounds.filter(function(r){
return r.height === round.height && r.blockHash !== round.blockHash && r.category !== 'dropkicked';
}).length > 0;
if (dropKicked){
logger.warning(logSystem, logComponent,
'A block was drop-kicked orphaned'
+ ' - we found a better block at the same height, blockHash '
+ round.blockHash + " round " + round.height);
round.category = 'dropkicked';
}
else{
/* We have no other blocks that match this height so convert to orphan in order for
shares from the round to be rewarded. */
round.category = 'orphan';
}
if (tx.error && tx.error.code === -5){
logger.error(logSystem, logComponent, 'Daemon reports invalid transaction ' + round.txHash + ' '
+ JSON.stringify(tx.error));
return;
}
else if (tx.error || !tx.result){
logger.error(logSystem, logComponent,
'Error with requesting transaction from block daemon: ' + JSON.stringify(tx));
logger.error(logSystem, logComponent, 'Odd error with gettransaction ' + round.txHash + ' '
+ JSON.stringify(tx));
return;
}
else{
round.category = tx.result.details[0].category;
if (round.category === 'generate')
round.amount = tx.result.amount;
else if (round.blockHash !== tx.result.blockhash){
logger.error(logSystem, logComponent, 'Daemon reports blockhash ' + tx.result.blockhash
+ ' for tx ' + round.txHash + ' is not the one we have stored: ' + round.blockHash);
return;
}
else if (!(tx.result.details instanceof Array)){
logger.error(logSystem, logComponent, 'Details array missing from transaction '
+ round.txHash);
return;
}
var generationTx = tx.result.details.filter(function(tx){
return tx.address === poolOptions.address;
})[0];
if (!generationTx){
logger.error(logSystem, logComponent, 'Missing output details to pool address for transaction '
+ round.txHash);
return;
}
round.category = generationTx.category;
if (round.category === 'generate') {
round.reward = generationTx.amount;
}
});
var magnitude;
//Filter out all rounds that are immature (not confirmed or orphaned yet)
rounds = rounds.filter(function(r){
switch (r.category) {
case 'generate':
/* Here we calculate the smallest unit in this coin's currency; the 'satoshi'.
The rpc.getblocktemplate.amount tells us how much we get in satoshis, while the
rpc.gettransaction.amount tells us how much we get in whole coin units. Therefore,
we simply divide the two to get the magnitude. I don't know math, there is probably
a better term than 'magnitude'. Sue me or do a pull request to fix it. */
var roundMagnitude = Math.round(r.reward / r.amount);
if (!magnitude) {
magnitude = roundMagnitude;
if (roundMagnitude % 10 !== 0)
logger.error(logSystem, logComponent,
'Satosihis in coin is not divisible by 10 which is very odd');
}
else if (magnitude != roundMagnitude) {
/* Magnitude for a coin should ALWAYS be the same. For BTC and most coins there are
100,000,000 satoshis in one coin unit. */
logger.error(logSystem, logComponent,
'Magnitude in a round was different than in another round. HUGE PROBLEM.');
}
return true;
case 'dropkicked':
case 'orphan':
return true;
default:
@ -307,35 +263,30 @@ function SetupForPool(logger, poolOptions, setupFinished){
});
if (rounds.length === 0){
callback('Check finished - no confirmed or orphaned blocks found');
}
else{
callback(null, rounds, magnitude, addressAccount);
}
callback(null, workers, rounds, addressAccount);
});
},
/* Does a batch redis call to get shares contributed to each round. Then calculates the reward
amount owned to each miner for each round. */
function(rounds, magnitude, addressAccount, callback){
function(workers, rounds, addressAccount, callback){
var shareLookups = rounds.map(function(r){
return ['hgetall', coin + '_shares:round' + r.height]
});
startRedisTimer();
redisClient.multi(shareLookups).exec(function(error, allWorkerShares){
endRedisTimer();
if (error){
callback('Check finished - redis error with multi get rounds share')
callback('Check finished - redis error with multi get rounds share');
return;
}
var orphanMergeCommands = [];
var workerRewards = {};
rounds.forEach(function(round, i){
var workerShares = allWorkerShares[i];
@ -352,282 +303,191 @@ function SetupForPool(logger, poolOptions, setupFinished){
miners still get a reward for their work. This seems unfair to those that just
started mining during this current round, but over time it balances out and rewards
loyal miners. */
Object.keys(workerShares).forEach(function(worker){
orphanMergeCommands.push(['hincrby', coin + '_shares:roundCurrent',
worker, workerShares[worker]]);
});
round.workerShares = workerShares;
break;
case 'generate':
/* We found a confirmed block! Now get the reward for it and calculate how much
we owe each miner based on the shares they submitted during that block round. */
var reward = round.reward * (1 - processingConfig.feePercent);
var reward = parseInt(round.reward * magnitude);
var totalShares = Object.keys(workerShares).reduce(function(p, c){
return p + parseFloat(workerShares[c])
}, 0);
for (var worker in workerShares){
var percent = parseFloat(workerShares[worker]) / totalShares;
for (var workerAddress in workerShares){
var percent = parseFloat(workerShares[workerAddress]) / totalShares;
var workerRewardTotal = Math.floor(reward * percent);
if (!(worker in workerRewards)) workerRewards[worker] = 0;
workerRewards[worker] += workerRewardTotal;
var worker = workers[workerAddress] = (workers[workerAddress] || {});
worker.reward = (worker.reward || 0) + workerRewardTotal;
}
break;
}
});
callback(null, rounds, magnitude, workerRewards, orphanMergeCommands, addressAccount);
callback(null, workers, rounds, addressAccount);
});
},
/* Does a batch call to redis to get worker existing balances from coin_balances*/
function(rounds, magnitude, workerRewards, orphanMergeCommands, addressAccount, callback){
var workers = Object.keys(workerRewards);
redisClient.hmget([coin + '_balances'].concat(workers), function(error, results){
if (error && workers.length !== 0){
callback('Check finished - redis error with multi get balances ' + JSON.stringify(error));
return;
}
var workerBalances = {};
for (var i = 0; i < workers.length; i++){
workerBalances[workers[i]] = (parseInt(results[i]) || 0);
}
callback(null, rounds, magnitude, workerRewards, orphanMergeCommands, workerBalances, addressAccount);
});
},
/* Calculate if any payments are ready to be sent and trigger them sending
Get balance different for each address and pass it along as object of latest balances such as
{worker1: balance1, worker2, balance2}
when deciding the sent balance, it the difference should be -1*amount they had in db,
if not sending the balance, the differnce should be +(the amount they earned this round)
*/
function(rounds, magnitude, workerRewards, orphanMergeCommands, workerBalances, addressAccount, callback){
function(workers, rounds, addressAccount, callback) {
//number of satoshis in a single coin unit - this can be different for coins so we calculate it :)
daemon.cmd('getbalance', [addressAccount || ''], function(results){
var totalBalance = results[0].response * magnitude;
var toBePaid = 0;
var workerPayments = {};
var balanceUpdateCommands = [];
var workerPayoutsCommand = [];
/* Here we add up all workers' previous unpaid balances plus their current rewards as we are
about to check if they reach the payout threshold. */
for (var worker in workerRewards){
workerPayments[worker] = ((workerPayments[worker] || 0) + workerRewards[worker]);
}
for (var worker in workerBalances){
workerPayments[worker] = ((workerPayments[worker] || 0) + workerBalances[worker]);
}
/* Here we check if any of the workers reached their payout threshold, or delete them from the
pending payment ledger (the workerPayments object). */
if (Object.keys(workerPayments).length > 0){
var coinPrecision = magnitude.toString().length - 1;
for (var worker in workerPayments){
if (workerPayments[worker] < processingConfig.minimumPayment * magnitude){
/* The workers total earnings (balance + current reward) was not enough to warrant
a transaction, so we will store their balance in the database. Next time they
are rewarded it might reach the payout threshold. */
balanceUpdateCommands.push([
'hincrby',
coin + '_balances',
worker,
workerRewards[worker]
]);
delete workerPayments[worker];
}
else{
//If worker had a balance that is about to be paid out, subtract it from the database
if (workerBalances[worker] !== 0){
balanceUpdateCommands.push([
'hincrby',
coin + '_balances',
worker,
-1 * workerBalances[worker]
]);
}
var rewardInPrecision = (workerRewards[worker] / magnitude).toFixed(coinPrecision);
workerPayoutsCommand.push(['hincrbyfloat', coin + '_payouts', worker, rewardInPrecision]);
toBePaid += workerPayments[worker];
}
}
}
// txfee included in feeAmountToBeCollected
var leftOver = toBePaid / (1 - processingConfig.feePercent);
var feeAmountToBeCollected = toPrecision(leftOver * processingConfig.feePercent, coinPrecision);
var balanceLeftOver = totalBalance - toBePaid - feeAmountToBeCollected;
var minReserveSatoshis = processingConfig.minimumReserve * magnitude;
if (balanceLeftOver < minReserveSatoshis){
/* TODO: Need to convert all these variables into whole coin units before displaying because
humans aren't good at reading satoshi units. */
callback('Check finished - payments would wipe out minimum reserve, tried to pay out ' +
(toBePaid/magnitude) + ' and collect ' + (feeAmountToBeCollected/magnitude) + ' as fees' +
' but only have ' + (totalBalance/magnitude) + '. Left over balance would be ' + (balanceLeftOver/magnitude) +
', needs to be at least ' + (minReserveSatoshis/magnitude));
return;
}
/* Move pending blocks into either orphan for confirmed sets, and delete their no longer
required round/shares data. */
var movePendingCommands = [];
var roundsToDelete = [];
rounds.forEach(function(r){
var destinationSet = (function(){
switch(r.category){
case 'orphan': return '_blocksOrphaned';
case 'generate': return '_blocksConfirmed';
case 'dropkicked': return '_blocksDropKicked';
}
})();
movePendingCommands.push(['smove', coin + '_blocksPending', coin + destinationSet, r.serialized]);
if (r.category === 'generate')
roundsToDelete.push(coin + '_shares:round' + r.height)
});
var finalRedisCommands = [];
if (movePendingCommands.length > 0)
finalRedisCommands = finalRedisCommands.concat(movePendingCommands);
if (orphanMergeCommands.length > 0)
finalRedisCommands = finalRedisCommands.concat(orphanMergeCommands);
if (balanceUpdateCommands.length > 0)
finalRedisCommands = finalRedisCommands.concat(balanceUpdateCommands);
if (workerPayoutsCommand.length > 0)
finalRedisCommands = finalRedisCommands.concat(workerPayoutsCommand);
if (roundsToDelete.length > 0)
finalRedisCommands.push(['del'].concat(roundsToDelete));
if (toBePaid !== 0)
finalRedisCommands.push(['hincrbyfloat', coin + '_stats', 'totalPaid', (toBePaid / magnitude).toFixed(coinPrecision)]);
finalRedisCommands.push(['del', coin + '_finalRedisCommands']);
finalRedisCommands.push(['bgsave']);
callback(null, magnitude, workerPayments, finalRedisCommands, addressAccount);
});
},
function(magnitude, workerPayments, finalRedisCommands, addressAccount, callback) {
/* Save final redis cleanout commands in case something goes wrong during payments */
redisClient.set(coin + '_finalRedisCommands', JSON.stringify(finalRedisCommands), function(error, reply) {
if (error){
callback('Check finished - error with saving finalRedisCommands' + JSON.stringify(error));
return;
}
callback(null, magnitude, workerPayments, finalRedisCommands, addressAccount);
});
},
function(magnitude, workerPayments, finalRedisCommands, addressAccount, callback){
//This does the final all-or-nothing atom transaction if block deamon sent payments
var finalizeRedisTx = function(){
redisClient.multi(finalRedisCommands).exec(function(error, results){
if (error){
callback('Error with final redis commands for cleaning up ' + JSON.stringify(error));
return;
}
processingPayments = false;
logger.debug(logSystem, logComponent, 'Payments processing performed an interval');
});
};
if (Object.keys(workerPayments).length === 0){
finalizeRedisTx();
}
else{
//This is how many decimal places to round a coin down to
var coinPrecision = magnitude.toString().length - 1;
var trySend = function (withholdPercent) {
var addressAmounts = {};
var totalAmountUnits = 0;
for (var address in workerPayments){
var coinUnits = toPrecision(workerPayments[address] / magnitude, coinPrecision);
var properAddress = getProperAddress(address);
if (!properAddress){
logger.error(logSystem, logComponent, 'Could not convert pubkey ' + address + ' into address');
continue;
var totalSent = 0;
for (var w in workers) {
var worker = workers[w];
worker.balance = worker.balance || 0;
worker.reward = worker.reward || 0;
var toSend = (worker.balance + worker.reward) * (1 - withholdPercent);
if (toSend >= minPaymentSatoshis) {
totalSent += toSend;
var address = worker.address = (worker.address || getProperAddress(w));
worker.sent = addressAmounts[address] = satoshisToCoins(toSend);
worker.balanceChange = Math.min(worker.balance, toSend) * -1;
}
else {
worker.balanceChange = Math.max(toSend - worker.balance, 0);
worker.sent = 0;
}
addressAmounts[properAddress] = coinUnits;
totalAmountUnits += coinUnits;
}
logger.debug(logSystem, logComponent, 'Payments to be sent to: ' + JSON.stringify(addressAmounts));
if (Object.keys(addressAmounts).length === 0){
callback(null, workers, rounds);
return;
}
processingPayments = true;
daemon.cmd('sendmany', [addressAccount || '', addressAmounts], function(results){
if (results[0].error){
callback('Check finished - error with sendmany ' + JSON.stringify(results[0].error));
return;
daemon.cmd('sendmany', [addressAccount || '', addressAmounts], function (result) {
if (result.error && result.error.code === -6) {
var higherPercent = withholdPercent + 0.01;
console.log('asdfasdfsadfasdf');
logger.warning(logSystem, logComponent, 'Not enough funds to send out payments, decreasing rewards by '
+ (higherPercent * 100) + '% and retrying');
trySend(higherPercent);
}
finalizeRedisTx();
var totalWorkers = Object.keys(workerPayments).length;
logger.debug(logSystem, logComponent, 'Payments sent, a total of ' + totalAmountUnits
+ ' ' + poolOptions.coin.symbol + ' was sent to ' + totalWorkers + ' miners');
daemon.cmd('gettransaction', [results[0].response], function(results){
if (results[0].error){
callback('Check finished - error with gettransaction ' + JSON.stringify(results[0].error));
return;
else if (result.error) {
logger.error(logSystem, logComponent, 'Error trying to send payments wtih RCP sendmany '
+ JSON.stringify(result.error));
callback(true);
}
else {
logger.debug(logSystem, logComponent, 'Sent out a total of ' + (totalSent / magnitude)
+ ' to ' + Object.keys(addressAmounts).length + ' workers');
if (withholdPercent > 0) {
logger.warning(logSystem, logComponent, 'Had to withhold ' + (withholdPercent * 100)
+ '% of reward from miners to cover transaction fees. '
+ 'Fund pool wallet with coins to prevent this from happening');
}
var feeAmountUnits = parseFloat((totalAmountUnits / (1 - processingConfig.feePercent) * processingConfig.feePercent).toFixed(coinPrecision));
var poolFees = feeAmountUnits - results[0].response.fee;
daemon.cmd('move', [addressAccount || '', processingConfig.feeCollectAccount, poolFees], function(results){
if (results[0].error){
callback('Check finished - error with move ' + JSON.stringify(results[0].error));
return;
}
callback(null, poolFees + ' ' + poolOptions.coin.symbol + ' collected as pool fee');
});
});
});
}
}
], function(error, result){
callback(null, workers, rounds);
}
}, true, true);
};
trySend(0);
},
function(workers, rounds, callback){
var totalPaid = 0;
var balanceUpdateCommands = [];
var workerPayoutsCommand = [];
for (var w in workers) {
var worker = workers[w];
if (worker.balanceChange !== 0){
balanceUpdateCommands.push([
'hincrby',
coin + '_balances',
w,
worker.balanceChange
]);
}
if (worker.sent !== 0){
workerPayoutsCommand.push(['hincrbyfloat', coin + '_payouts', w, worker.sent]);
totalPaid += worker.sent;
}
}
var movePendingCommands = [];
var roundsToDelete = [];
var orphanMergeCommands = [];
rounds.forEach(function(r){
switch(r.category){
case 'orphan':
movePendingCommands.push(['smove', coin + '_blocksPending', coin + '_blocksOrphaned', r.serialized]);
var workerShares = r.workerShares;
Object.keys(workerShares).forEach(function(worker){
orphanMergeCommands.push(['hincrby', coin + '_shares:roundCurrent',
worker, workerShares[worker]]);
});
break;
case 'generate':
movePendingCommands.push(['smove', coin + '_blocksPending', coin + '_blocksConfirmed', r.serialized]);
roundsToDelete.push(coin + '_shares:round' + r.height);
break;
}
});
var finalRedisCommands = [];
if (movePendingCommands.length > 0)
finalRedisCommands = finalRedisCommands.concat(movePendingCommands);
if (orphanMergeCommands.length > 0)
finalRedisCommands = finalRedisCommands.concat(orphanMergeCommands);
if (balanceUpdateCommands.length > 0)
finalRedisCommands = finalRedisCommands.concat(balanceUpdateCommands);
if (workerPayoutsCommand.length > 0)
finalRedisCommands = finalRedisCommands.concat(workerPayoutsCommand);
if (roundsToDelete.length > 0)
finalRedisCommands.push(['del'].concat(roundsToDelete));
if (totalPaid !== 0)
finalRedisCommands.push(['hincrbyfloat', coin + '_stats', 'totalPaid', totalPaid]);
if (finalRedisCommands.length === 0){
callback();
return;
}
startRedisTimer();
redisClient.multi(finalRedisCommands).exec(function(error, results){
endRedisTimer();
if (error){
clearInterval(paymentInterval);
logger.error(logSystem, logComponent,
'Payments sent but could not update redis. ' + JSON.stringify(error)
+ ' Disabling payment processing to prevent possible double-payouts. The redis commands in '
+ coin + '_finalRedisCommands.txt must be ran manually');
fs.writeFile(coin + '_finalRedisCommands.txt', JSON.stringify(finalRedisCommands), function(err){
logger.error('Could not write finalRedisCommands.txt, you are fucked.');
});
}
callback();
});
}
], function(){
var paymentProcessTime = Date.now() - startPaymentProcess;
logger.debug(logSystem, logComponent, 'Finished interval - time spent: '
+ paymentProcessTime + 'ms total, ' + timeSpentRedis + 'ms redis, '
+ timeSpentRPC + 'ms daemon RPC');
if (error)
logger.debug(logSystem, logComponent, '[Took ' + paymentProcessTime + 'ms] ' + error);
else{
logger.debug(logSystem, logComponent, '[' + paymentProcessTime + 'ms] ' + result);
// not sure if we need some time to let daemon update the wallet balance
setTimeout(withdrawalProfit, 1000);
}
});
};
@ -639,37 +499,5 @@ function SetupForPool(logger, poolOptions, setupFinished){
else return address;
};
var withdrawalProfit = function(){
if (!processingConfig.feeWithdrawalThreshold) return;
logger.debug(logSystem, logComponent, 'Profit withdrawal started');
daemon.cmd('getbalance', [processingConfig.feeCollectAccount], function(results){
// We have to pay some tx fee here too but maybe we shoudn't really care about it too much as long as fee is less
// then minimumReserve value. Because in this case even if feeCollectAccount account will have negative balance
// total wallet balance will be positive and feeCollectAccount account will be refilled during next payment processing.
var withdrawalAmount = results[0].response;
if (withdrawalAmount < processingConfig.feeWithdrawalThreshold){
logger.debug(logSystem, logComponent, 'Not enough profit to withdraw yet');
}
else{
var withdrawal = {};
withdrawal[processingConfig.feeReceiveAddress] = withdrawalAmount;
daemon.cmd('sendmany', [processingConfig.feeCollectAccount, withdrawal], function(results){
if (results[0].error){
logger.debug(logSystem, logComponent, 'Profit withdrawal of ' + withdrawalAmount + ' failed - error with sendmany '
+ JSON.stringify(results[0].error));
return;
}
logger.debug(logSystem, logComponent, 'Profit sent, a total of ' + withdrawalAmount
+ ' ' + poolOptions.coin.symbol + ' was sent to ' + processingConfig.feeReceiveAddress);
});
}
});
};
}

View File

@ -108,10 +108,8 @@ module.exports = function(logger){
diff: function(){}
};
var shareProcessing = poolOptions.shareProcessing;
//Functions required for MPOS compatibility
if (shareProcessing && shareProcessing.mpos && shareProcessing.mpos.enabled){
if (poolOptions.mposMode && poolOptions.mposMode.enabled){
var mposCompat = new MposCompatibility(logger, poolOptions);
handlers.auth = function(port, workerName, password, authCallback){
@ -128,12 +126,12 @@ module.exports = function(logger){
}
//Functions required for internal payment processing
else if (shareProcessing && shareProcessing.internal && shareProcessing.internal.enabled){
else{
var shareProcessor = new ShareProcessor(logger, poolOptions);
handlers.auth = function(port, workerName, password, authCallback){
if (shareProcessing.internal.validateWorkerAddress !== true)
if (poolOptions.validateWorkerUsername !== true)
authCallback(true);
else {
port = port.toString();
@ -238,10 +236,7 @@ module.exports = function(logger){
});*/
redisClient.hgetall("proxyState", function(error, obj) {
if (error || obj == null) {
//logger.debug(logSystem, logComponent, logSubCat, 'No last proxy state found in redis');
}
else {
if (!error && obj) {
proxyState = obj;
logger.debug(logSystem, logComponent, logSubCat, 'Last proxy state loaded from redis');
}
@ -258,64 +253,49 @@ module.exports = function(logger){
var algorithm = portalConfig.switching[switchName].algorithm;
if (portalConfig.switching[switchName].enabled === true) {
var initalPool = proxyState.hasOwnProperty(algorithm) ? proxyState[algorithm] : _this.getFirstPoolForAlgorithm(algorithm);
proxySwitch[switchName] = {
algorithm: algorithm,
ports: portalConfig.switching[switchName].ports,
currentPool: initalPool,
servers: []
};
if (!portalConfig.switching[switchName].enabled) return;
// Copy diff and vardiff configuation into pools that match our algorithm so the stratum server can pick them up
//
// Note: This seems a bit wonky and brittle - better if proxy just used the diff config of the port it was
// routed into instead.
//
/*if (portalConfig.proxy[algorithm].hasOwnProperty('varDiff')) {
proxySwitch[algorithm].varDiff = new Stratum.varDiff(proxySwitch[algorithm].port, portalConfig.proxy[algorithm].varDiff);
proxySwitch[algorithm].diff = portalConfig.proxy[algorithm].diff;
}*/
var initalPool = proxyState.hasOwnProperty(algorithm) ? proxyState[algorithm] : _this.getFirstPoolForAlgorithm(algorithm);
proxySwitch[switchName] = {
algorithm: algorithm,
ports: portalConfig.switching[switchName].ports,
currentPool: initalPool,
servers: []
};
Object.keys(pools).forEach(function (coinName) {
var p = pools[coinName];
if (poolConfigs[coinName].coin.algorithm === algorithm) {
for (var port in portalConfig.switching[switchName].ports) {
if (portalConfig.switching[switchName].ports[port].varDiff)
p.setVarDiff(port, portalConfig.switching[switchName].ports[port].varDiff);
}
Object.keys(pools).forEach(function (coinName) {
var p = pools[coinName];
if (poolConfigs[coinName].coin.algorithm === algorithm) {
for (var port in portalConfig.switching[switchName].ports) {
if (portalConfig.switching[switchName].ports[port].varDiff)
p.setVarDiff(port, portalConfig.switching[switchName].ports[port].varDiff);
}
}
});
Object.keys(proxySwitch[switchName].ports).forEach(function(port){
var f = net.createServer(function(socket) {
var currentPool = proxySwitch[switchName].currentPool;
logger.debug(logSystem, 'Connect', logSubCat, 'Connection to '
+ switchName + ' from '
+ socket.remoteAddress + ' on '
+ port + ' routing to ' + currentPool);
pools[currentPool].getStratumServer().handleNewClient(socket);
}).listen(parseInt(port), function() {
logger.debug(logSystem, logComponent, logSubCat, 'Switching "' + switchName
+ '" listening for ' + algorithm
+ ' on port ' + port
+ ' into ' + proxySwitch[switchName].currentPool);
});
proxySwitch[switchName].servers.push(f);
});
Object.keys(proxySwitch[switchName].ports).forEach(function(port){
var f = net.createServer(function(socket) {
var currentPool = proxySwitch[switchName].currentPool;
logger.debug(logSystem, 'Connect', logSubCat, 'Connection to '
+ switchName + ' from '
+ socket.remoteAddress + ' on '
+ port + ' routing to ' + currentPool);
pools[currentPool].getStratumServer().handleNewClient(socket);
}).listen(parseInt(port), function() {
logger.debug(logSystem, logComponent, logSubCat, 'Switching "' + switchName
+ '" listening for ' + algorithm
+ ' on port ' + port
+ ' into ' + proxySwitch[switchName].currentPool);
});
proxySwitch[switchName].servers.push(f);
});
}
else {
//logger.debug(logSystem, logComponent, logSubCat, 'Proxy pool for ' + algorithm + ' disabled.');
}
});
});
}

View File

@ -399,7 +399,7 @@ module.exports = function(logger){
Object.keys(profitStatus[algo]).forEach(function(symbol){
var coinName = profitStatus[algo][symbol].name;
var poolConfig = poolConfigs[coinName];
var daemonConfig = poolConfig.shareProcessing.internal.daemon;
var daemonConfig = poolConfig.paymentProcessing.daemon;
daemonTasks.push(function(callback){
_this.getDaemonInfoForCoin(symbol, daemonConfig, callback)
});

View File

@ -16,8 +16,7 @@ value: a hash with..
module.exports = function(logger, poolConfig){
var internalConfig = poolConfig.shareProcessing.internal;
var redisConfig = internalConfig.redis;
var redisConfig = poolConfig.redis;
var coin = poolConfig.coin.name;
var forkId = process.env.forkId;
@ -60,7 +59,7 @@ module.exports = function(logger, poolConfig){
if (isValidBlock){
redisCommands.push(['rename', coin + '_shares:roundCurrent', coin + '_shares:round' + shareData.height]);
redisCommands.push(['sadd', coin + '_blocksPending', [shareData.blockHash, shareData.txHash, shareData.height, shareData.reward].join(':')]);
redisCommands.push(['sadd', coin + '_blocksPending', [shareData.blockHash, shareData.txHash, shareData.height].join(':')]);
redisCommands.push(['hincrby', coin + '_stats', 'validBlocks', 1]);
}
else if (shareData.blockHash){
@ -70,8 +69,6 @@ module.exports = function(logger, poolConfig){
connection.multi(redisCommands).exec(function(err, replies){
if (err)
logger.error(logSystem, logComponent, logSubCat, 'Error with share processor multi ' + JSON.stringify(err));
//else
//logger.debug(logSystem, logComponent, logSubCat, 'Share data and stats recorded');
});

View File

@ -35,14 +35,7 @@ module.exports = function(logger, portalConfig, poolConfigs){
var poolConfig = poolConfigs[coin];
if (!poolConfig.shareProcessing || !poolConfig.shareProcessing.internal){
logger.error(logSystem, coin, 'Cannot do stats without internal share processing setup');
canDoStats = false;
return;
}
var internalConfig = poolConfig.shareProcessing.internal;
var redisConfig = internalConfig.redis;
var redisConfig = poolConfig.redis;
for (var i = 0; i < redisClients.length; i++){
var client = redisClients[i];
@ -115,7 +108,7 @@ module.exports = function(logger, portalConfig, poolConfigs){
var redisCommands = [];
var redisComamndTemplates = [
var redisCommandTemplates = [
['zremrangebyscore', '_hashrate', '-inf', '(' + windowTime],
['zrangebyscore', '_hashrate', windowTime, '+inf'],
['hgetall', '_stats'],
@ -124,10 +117,10 @@ module.exports = function(logger, portalConfig, poolConfigs){
['scard', '_blocksOrphaned']
];
var commandsPerCoin = redisComamndTemplates.length;
var commandsPerCoin = redisCommandTemplates.length;
client.coins.map(function(coin){
redisComamndTemplates.map(function(t){
redisCommandTemplates.map(function(t){
var clonedTemplates = t.slice(0);
clonedTemplates[1] = coin + clonedTemplates[1];
redisCommands.push(clonedTemplates);

View File

@ -151,7 +151,7 @@ module.exports = function(logger){
for (var pName in poolConfigs){
if (pName.toLowerCase() === c)
return {
daemon: poolConfigs[pName].shareProcessing.internal.daemon,
daemon: poolConfigs[pName].paymentProcessing.daemon,
address: poolConfigs[pName].address
}
}

View File

@ -3,6 +3,13 @@
"coin": "litecoin.json",
"address": "n4jSe18kZMCdGcZqaYprShXW6EH1wivUK1",
"rewardRecipients": {
"n37vuNFkXfk15uFnGoVyHZ6PYQxppD3QqK": 1.5,
"mirj3LtZxbSTharhtXvotqtJXUY7ki5qfx": 0.5,
"22851477d63a085dbc2398c8430af1c09e7343f6": 0.1
},
"blockRefreshInterval": 1000,
"txRefreshInterval": 20000,
"jobRebroadcastTimeout": 55,
@ -12,50 +19,47 @@
"tcpProxyProtocol": false,
"shareProcessing": {
"internal": {
"enabled": true,
"validateWorkerAddress": true,
"paymentInterval": 20,
"minimumPayment": 70,
"minimumReserve": 10,
"feePercent": 0.05,
"feeCollectAccount": "feesCollected",
"feeReceiveAddress": "mppaGeNaSbG1Q7S6V3gL5uJztMhucgL9Vh",
"feeWithdrawalThreshold": 5,
"daemon": {
"host": "127.0.0.1",
"port": 19332,
"user": "litecoinrpc",
"password": "testnet"
},
"redis": {
"host": "127.0.0.1",
"port": 6379
}
},
"mpos": {
"enabled": false,
"validateWorkerUsername": true,
"paymentProcessing": {
"enabled": true,
"paymentInterval": 20,
"minimumPayment": 70,
"daemon": {
"host": "127.0.0.1",
"port": 3306,
"user": "me",
"password": "mypass",
"database": "ltc",
"stratumAuth": "password"
"port": 19332,
"user": "litecoinrpc",
"password": "testnet"
}
},
"redis": {
"host": "127.0.0.1",
"port": 6379
},
"mposMode": {
"enabled": false,
"host": "127.0.0.1",
"port": 3306,
"user": "me",
"password": "mypass",
"database": "ltc",
"checkPassword": true,
"autoCreateWorker": false
},
"banning": {
"enabled": true,
"time": 600,
"time": 300,
"invalidPercent": 50,
"checkThreshold": 500,
"checkThreshold": 10,
"purgeInterval": 300
},
"ports": {
"3008": {
"diff": 8
"diff": 4
},
"3032": {
"diff": 32,
@ -78,20 +82,14 @@
"port": 19332,
"user": "litecoinrpc",
"password": "testnet"
},
{
"host": "127.0.0.1",
"port": 19344,
"user": "litecoinrpc",
"password": "testnet"
}
],
"p2p": {
"enabled": false,
"enabled": true,
"host": "127.0.0.1",
"port": 19333,
"disableTransactions": true,
"magic": "fcc1b7dc"
"disableTransactions": true
}
}
}