I am investigating options for running node in a multi-core environment.
I\'m trying to determine the best approach and so far I\'ve seen these options
We use Supervisor to manage our Node.JS process's, to start them upon boot, and to act as a watchdog in case the process's crash.
We use Nginx as a reverse-proxy to load balance traffic between the process's that listen to different ports
this way each process is isolated from the other.
for example: Nginx listens on port 80 and forwards traffic to ports 8000-8003
I was using PM2 for quite a while, but their pricing is expensive for my needs as I'm having my own analytics environment and I don't require support, so I decided to experiment alternatives. For my case, just forever made the trick, very simple one actually:
forever -m 5 app.js
Another useful example is
forever start app.js -p 8080
I've been using the default cluster library, and it works very well. I've had over 10,000 concurrents(multiple clusters on multiple servers) and it works very well.
It is suggested to use clusters with domain for error handling.
This is lifted straight from http://nodejs.org/api/domain.html I've mades some changes on how it spawns new clusters for each core of your machine. and got rid of if/else and added express.
var cluster = require('cluster'),
http = require('http'),
PORT = process.env.PORT || 1337,
os = require('os'),
server;
function forkClusters () {
var cpuCount = os.cpus().length;
// Create a worker for each CPU
for (var i = 0; i < cpuCount ; i += 1) {
cluster.fork();
}
}
// Master Process
if (cluster.isMaster) {
// You can also of course get a bit fancier about logging, and
// implement whatever custom logic you need to prevent DoS
// attacks and other bad behavior.
//
// See the options in the cluster documentation.
//
// The important thing is that the master does very little,
// increasing our resilience to unexpected errors.
forkClusters ()
cluster.on('disconnect', function(worker) {
console.error('disconnect!');
cluster.fork();
});
}
function handleError (d) {
d.on('error', function(er) {
console.error('error', er.stack);
// Note: we're in dangerous territory!
// By definition, something unexpected occurred,
// which we probably didn't want.
// Anything can happen now!Be very careful!
try {
// make sure we close down within 30 seconds
var killtimer = setTimeout(function() {
process.exit(1);
}, 30000);
// But don't keep the process open just for that!
killtimer.unref();
// stop taking new requests.
server.close();
// Let the master know we're dead.This will trigger a
// 'disconnect' in the cluster master, and then it will fork
// a new worker.
cluster.worker.disconnect();
} catch (er2) {
// oh well, not much we can do at this point.
console.error('Error sending 500!', er2.stack);
}
});
}
// child Process
if (cluster.isWorker) {
// the worker
//
// This is where we put our bugs!
var domain = require('domain');
var express = require('express');
var app = express();
app.set('port', PORT);
// See the cluster documentation for more details about using
// worker processes to serve requests.How it works, caveats, etc.
var d = domain.create();
handleError(d);
// Now run the handler function in the domain.
//
// put all code here. any code included outside of domain.run will not handle errors on the domain level, but will crash the app.
//
d.run(function() {
// this is where we start our server
server = http.createServer(app).listen(app.get('port'), function () {
console.log('Cluster %s listening on port %s', cluster.worker.id, app.get('port'));
});
});
}