2 years ago
#383465
Jaeger
Node Socket.IO/Apache doesn't free memory after shutdown
I have a normal setup with multiple cluster. When reaching 10K concurrent connections with websocket, it reaches around 99% memory usage and then shuts down due to low memory.
Now the issue is, that if I kill the background job of the node app, it doesn't free the memory. I'll have to restart the apache service httpd to free it.
I'm running the app with manual gc node --expose_gc app.js with an interval of 1000ms. It's a socket.io node app, using websocket transport. Implemented the cluster example from the official docs, nothing changed!
CentOS7 with Apache, using cluster for multiple nodes along with reverse proxy. My setup has around 25GB of RAM free for socket.io's usage.
Any ideas how to free the memory? The service is shut down, and nothing gets saved, I'm not using any storing variables.
Apache's config
SSLEngine on
ProxyRequests off
ProxyPass "/websocket/socket" balancer://nodes_ws/
ProxyPassReverse "/websocket/socket" balancer://nodes_ws/
ProxyTimeout 3
Header add Set-Cookie "BlazocketServer=sticky.%{BALANCER_WORKER_ROUTE}e; path=/" env=BALANCER_ROUTE_CHANGED
<Proxy "balancer://nodes_polling">
BalancerMember "https://localhost:3000" route=app01
BalancerMember "https://localhost:3001" route=app02
BalancerMember "https://localhost:3002" route=app03
ProxySet stickysession=BlazocketServer
</Proxy>
<Proxy "balancer://nodes_ws">
BalancerMember "ws://localhost:3000" route=app01
BalancerMember "ws://localhost:3001" route=app02
BalancerMember "ws://localhost:3002" route=app03
ProxySet stickysession=BlazocketServer
</Proxy>
RewriteEngine On
#RewriteCond %{QUERY_STRING} transport=polling
#RewriteRule /(.*)$ http://localhost:3000/$1 [P]
RewriteCond %{HTTP:Upgrade} =websocket [NC]
RewriteRule /(.*) balancer://nodes_ws/$1 [P,L]
RewriteCond %{QUERY_STRING} transport=polling
RewriteRule /(.*) balancer://nodes_polling/$1 [P,L]
<IfModule mpm_prefork_module>
ServerLimit 10000
StartServers 600
MinSpareServers 100
MaxSpareServers 500
MaxClients 10000
MaxConnectionsPerChild 10000
#MinSpareThreads 4500
#MaxSpareThreads 9000
#ThreadLimit 640
#ThreadsPerChild 3000
#MaxRequestWorkers 5120
#MaxRequestsPerChild 20000
</IfModule>
Socket's app.js
import express from "express";
const app = express();
import * as http from "http"
const httpServer = http.createServer(app);
import {Server} from "socket.io";
import cluster from "cluster"
import os from "os"
import { setupMaster, setupWorker } from "@socket.io/sticky"
import { createAdapter, setupPrimary } from "@socket.io/cluster-adapter"
http.globalAgent.maxSockets = Infinity;
httpServer.maxRequestsPerSocket = Infinity;
const numCPUs = os.cpus().length;
if (cluster.isMaster) {
console.log(`Master ${process.pid} is running`);
// Setup master worker
setupMaster(httpServer, {
loadBalancingMethod: "least-connection",
});
// Establish it
setupPrimary();
cluster.setupMaster({
serialization: "advanced",
});
// Listen on port 3000
httpServer.listen(3000, () => {
console.log("Started from master")
})
// Fork the workers of the cluster
for (let i = 0; i < numCPUs; i++) {
cluster.fork();
}
// Report any dead workers and fork it again
cluster.on("exit", (worker) => {
console.log(`Worker ${worker.process.pid} died`);
cluster.fork();
});
} else {
console.log(`Worker ${process.pid} started`);
// Setup the Socket.io instance
const io = new Server(httpServer, {});
// Establish the connection with the adapter and the worker's process
io.adapter(createAdapter());
setupWorker(io);
// Setup the connection
io.on('connection', (socket) => {
console.log(`#CONNECTION: ${socket.id} connected`)
})
}
javascript
node.js
apache
socket.io
0 Answers
Your Answer