Added interval for spawning pool forks

This commit is contained in:
Matt 2014-03-23 00:46:59 -06:00
parent ad1f4ce3d0
commit 14cd1d0070
4 changed files with 24 additions and 18 deletions

16
init.js
View File

@ -3,7 +3,7 @@ var os = require('os');
var cluster = require('cluster');
var async = require('async');
var posix = require('posix');
var PoolLogger = require('./libs/logUtil.js');
var BlocknotifyListener = require('./libs/blocknotifyListener.js');
@ -112,19 +112,25 @@ var spawnPoolWorkers = function(portalConfig, poolConfigs){
workerType : 'pool',
forkId : forkId,
pools : serializedConfigs,
portalConfig : JSON.stringify(portalConfig),
portalConfig : JSON.stringify(portalConfig)
});
worker.on('exit', function(code, signal){
logger.error('Master', 'Pool Worker', 'Fork ' + forkId + ' died, spawning replacement worker...');
logger.error('Master', 'PoolSpanwer', 'Fork ' + forkId + ' died, spawning replacement worker...');
setTimeout(function(){
createPoolWorker(forkId);
}, 2000);
});
};
for (var i = 0; i < numForks; i++) {
var i = 0;
var spawnInterval = setInterval(function(){
createPoolWorker(i);
}
i++;
if (i === numForks){
clearInterval(spawnInterval);
logger.debug('Master', 'PoolSpawner', 'Spawned pools for all ' + numForks + ' configured forks');
}
}, 250);
};

View File

@ -80,11 +80,11 @@ function SetupForPool(logger, poolOptions){
if (error){
logger.error(logSystem, logComponent, 'Could get blocks from redis ' + JSON.stringify(error));
callback('check finished - redis error for getting blocks');
callback('Check finished - redis error for getting blocks');
return;
}
if (results.length === 0){
callback('check finished - no pending blocks in redis');
callback('Check finished - no pending blocks in redis');
return;
}
@ -115,7 +115,7 @@ function SetupForPool(logger, poolOptions){
daemon.batchCmd(batchRPCcommand, function(error, txDetails){
if (error || !txDetails){
callback('check finished - daemon rpc error with batch gettransactions ' + JSON.stringify(error));
callback('Check finished - daemon rpc error with batch gettransactions ' + JSON.stringify(error));
return;
}
@ -162,7 +162,7 @@ function SetupForPool(logger, poolOptions){
if (rounds.length === 0){
callback('check finished - no confirmed or orphaned blocks found');
callback('Check finished - no confirmed or orphaned blocks found');
}
else{
callback(null, rounds, magnitude);
@ -183,7 +183,7 @@ function SetupForPool(logger, poolOptions){
redisClient.multi(shareLookups).exec(function(error, allWorkerShares){
if (error){
callback('check finished - redis error with multi get rounds share')
callback('Check finished - redis error with multi get rounds share')
return;
}
@ -228,7 +228,7 @@ function SetupForPool(logger, poolOptions){
redisClient.hmget([coin + '_balances'].concat(workers), function(error, results){
if (error && workers.length !== 0){
callback('check finished - redis error with multi get balances ' + JSON.stringify(error));
callback('Check finished - redis error with multi get balances ' + JSON.stringify(error));
return;
}
@ -297,7 +297,7 @@ function SetupForPool(logger, poolOptions){
var minReserveSatoshis = processingConfig.minimumReserve * magnitude;
if (balanceLeftOver < minReserveSatoshis){
callback('check finished - payments would wipe out minimum reserve, tried to pay out ' + toBePaid +
callback('Check finished - payments would wipe out minimum reserve, tried to pay out ' + toBePaid +
' but only have ' + totalBalance + '. Left over balance would be ' + balanceLeftOver +
', needs to be at least ' + minReserveSatoshis);
return;
@ -349,7 +349,7 @@ function SetupForPool(logger, poolOptions){
var finalizeRedisTx = function(){
redisClient.multi(finalRedisCommands).exec(function(error, results){
if (error){
callback('check finished - error with final redis commands for cleaning up ' + JSON.stringify(error));
callback('Check finished - error with final redis commands for cleaning up ' + JSON.stringify(error));
return;
}
callback(null, 'Payments processing performed an interval');
@ -373,7 +373,7 @@ function SetupForPool(logger, poolOptions){
logger.debug(logSystem, logComponent, 'Payments about to be sent to: ' + JSON.stringify(addressAmounts));
daemon.cmd('sendmany', ['', addressAmounts], function(results){
if (results[0].error){
callback('check finished - error with sendmany ' + JSON.stringify(results[0].error));
callback('Check finished - error with sendmany ' + JSON.stringify(results[0].error));
return;
}
finalizeRedisTx();

View File

@ -120,10 +120,10 @@ module.exports = function(logger){
if (data.solution && !isValidBlock)
logger.debug(logSystem, logComponent, logSubCat, 'We thought a block solution was found but it was rejected by the daemon, share data: ' + shareData);
else if (isValidBlock)
logger.debug(logSystem, logComponent, logSubCat, 'Block found, solution: ' + data.solution);
logger.debug(logSystem, logComponent, logSubCat, 'Block solution found: ' + data.solution);
if (isValidShare)
logger.debug(logSystem, logComponent, logSubCat, 'Valid share submitted, share data: ' + shareData);
logger.debug(logSystem, logComponent, logSubCat, 'Valid share diff of ' + data.difficultiy + ' submitted by worker ' + data.worker + ' [ ' + data.ip + ']' );
else if (!isValidShare)
logger.debug(logSystem, logComponent, logSubCat, 'Invalid share submitted, share data: ' + shareData)

View File

@ -77,9 +77,9 @@ module.exports = function(logger, poolConfig){
connection.multi(redisCommands).exec(function(err, replies){
if (err)
logger.error(logSystem, 'redis', 'error with share processor multi ' + JSON.stringify(err));
logger.error(logSystem, 'redis', 'Error with share processor multi ' + JSON.stringify(err));
else
logger.debug(logSystem, 'redis', 'share related data recorded');
logger.debug(logSystem, 'redis', 'Share data and stats recorded');
});