Update init.js

Added PPLNT time share tracking to master process for thread safety.
Added redis connection in init.js for PPLNT time share tracking.
This commit is contained in:
hellcatz 2017-05-06 17:41:36 -07:00 committed by GitHub
parent e6c4bf4c10
commit 6ea1df2cd5
1 changed files with 69 additions and 7 deletions

76
init.js
View File

@ -6,6 +6,8 @@ var cluster = require('cluster');
var async = require('async');
var extend = require('extend');
var redis = require('redis');
var PoolLogger = require('./libs/logUtil.js');
var CliListener = require('./libs/cliListener.js');
var PoolWorker = require('./libs/poolWorker.js');
@ -31,9 +33,6 @@ var logger = new PoolLogger({
logColors: portalConfig.logColors
});
try {
require('newrelic');
if (cluster.isMaster)
@ -66,7 +65,6 @@ catch(e){
logger.debug('POSIX', 'Connection Limit', '(Safe to ignore) POSIX module not installed and resource (connection) limit was not raised');
}
if (cluster.isWorker){
switch(process.env.workerType){
@ -179,16 +177,36 @@ var buildPoolConfigs = function(){
return configs;
};
function roundTo(n, digits) {
if (digits === undefined) {
digits = 0;
}
var multiplicator = Math.pow(10, digits);
n = parseFloat((n * multiplicator).toFixed(11));
var test =(Math.round(n) / multiplicator);
return +(test.toFixed(digits));
}
var _lastStartTimes = {};
var _lastShareTimes = {};
var spawnPoolWorkers = function(){
var redisConfig;
var connection;
Object.keys(poolConfigs).forEach(function(coin){
var p = poolConfigs[coin];
if (!Array.isArray(p.daemons) || p.daemons.length < 1){
var pcfg = poolConfigs[coin];
if (!Array.isArray(pcfg.daemons) || pcfg.daemons.length < 1){
logger.error('Master', coin, 'No daemons configured so a pool cannot be started for this coin.');
delete poolConfigs[coin];
} else if (!connection) {
redisConfig = pcfg.redis;
connection = redis.createClient(redisConfig.port, redisConfig.host);
connection.on('ready', function(){
logger.debug('PPLNT', coin, 'TimeShare processing setup with redis (' + redisConfig.host +
':' + redisConfig.port + ')');
});
}
});
@ -236,6 +254,50 @@ var spawnPoolWorkers = function(){
}
});
break;
case 'shareTrack':
// pplnt time share tracking of workers
if (msg.isValidShare && !msg.isValidBlock) {
var now = Date.now();
var lastShareTime = now;
var lastStartTime = now;
var workerAddress = msg.data.worker.split('.')[0];
// did they just join in this round?
if (!_lastShareTimes[workerAddress] || !_lastStartTimes[workerAddress]) {
_lastShareTimes[workerAddress] = now;
_lastStartTimes[workerAddress] = now;
}
if (_lastShareTimes[workerAddress] != null && _lastShareTimes[workerAddress] > 0) {
lastShareTime = _lastShareTimes[workerAddress];
lastStartTime = _lastStartTimes[workerAddress];
}
var redisCommands = [];
// if its been less than 10 minutes since last share was submitted
var timeChangeSec = roundTo(Math.max(now - lastShareTime, 0) / 1000, 4);
var timeChangeTotal = roundTo(Math.max(now - lastStartTime, 0) / 1000, 4);
if (timeChangeSec < 600) {
// loyal miner keeps mining :)
redisCommands.push(['hincrbyfloat', msg.coin + ':shares:timesCurrent', workerAddress, timeChangeSec]);
logger.debug('PPLNT', msg.coin, 'Thread '+msg.thread, workerAddress+':{totalTimeSec:'+timeChangeTotal+', timeChangeSec:'+timeChangeSec+'}');
connection.multi(redisCommands).exec(function(err, replies){
if (err)
logger.error('PPLNT', msg.coin, 'Thread '+msg.thread, 'Error with time share processor call to redis ' + JSON.stringify(err));
});
} else {
// they just re-joined the pool
_lastStartTimes[workerAddress] = now;
}
// track last time share
_lastShareTimes[workerAddress] = now;
}
if (msg.isValidBlock) {
// reset pplnt share times for next round
_lastShareTimes = {};
_lastStartTimes = {};
}
break;
}
});
};