Node.js Applications
Monitor Node.js application logs including Express.js, Fastify, NestJS, and custom applications using the File Stream plugin
Node.js Applications Integration
Monitor and analyze Node.js application logs in real-time using LogFlux Agent’s File Stream plugin. This configuration-based approach provides comprehensive log parsing, JavaScript application analytics, and modern web framework monitoring.
Overview
The Node.js Applications integration leverages LogFlux Agent’s File Stream plugin to:
- Real-time monitoring of Node.js application logs, error logs, and access logs
- Framework support for Express.js, Fastify, NestJS, and custom applications
- Performance monitoring with request processing, memory usage, and event loop metrics
- Error tracking with stack traces, uncaught exceptions, and promise rejections
- Microservice monitoring for distributed Node.js architectures
- PM2 integration for process management and cluster monitoring
Installation
The File Stream plugin is included with LogFlux Agent. Enable it for Node.js log monitoring:
1
2
3
4
5
|
# Enable File Stream plugin
sudo systemctl enable --now logflux-filestream
# Verify plugin status
sudo systemctl status logflux-filestream
|
Node.js Logging Setup
Configure structured logging in your Node.js applications:
Winston Logger Configuration
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
|
const winston = require('winston');
const path = require('path');
const logger = winston.createLogger({
level: process.env.LOG_LEVEL || 'info',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.errors({ stack: true }),
winston.format.json()
),
defaultMeta: {
service: 'my-node-app',
version: process.env.APP_VERSION || '1.0.0'
},
transports: [
// Write all logs to console
new winston.transports.Console({
format: winston.format.combine(
winston.format.colorize(),
winston.format.simple()
)
}),
// Write all logs to file
new winston.transports.File({
filename: '/var/log/nodejs/error.log',
level: 'error'
}),
new winston.transports.File({
filename: '/var/log/nodejs/combined.log'
})
]
});
module.exports = logger;
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
const pino = require('pino');
const logger = pino({
level: process.env.LOG_LEVEL || 'info',
timestamp: pino.stdTimeFunctions.isoTime,
formatters: {
level: (label) => {
return { level: label };
}
},
serializers: {
req: pino.stdSerializers.req,
res: pino.stdSerializers.res,
err: pino.stdSerializers.err
}
}, pino.destination('/var/log/nodejs/app.log'));
module.exports = logger;
|
Basic Configuration
Configure the File Stream plugin to monitor Node.js logs by creating /etc/logflux-agent/plugins/filestream-nodejs.toml
:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
[filestream.nodejs_application]
paths = ["/var/log/nodejs/*.log"]
format = "json"
tags = ["nodejs", "application", "javascript"]
fields = {
service = "nodejs",
log_type = "application"
}
[filestream.nodejs_access]
paths = ["/var/log/nodejs/access.log"]
format = "json"
tags = ["nodejs", "access", "web"]
fields = {
service = "nodejs",
log_type = "access"
}
[filestream.nodejs_error]
paths = ["/var/log/nodejs/error.log"]
format = "json"
tags = ["nodejs", "error", "javascript"]
fields = {
service = "nodejs",
log_type = "error"
}
|
Framework-Specific Configurations
Express.js Application Monitoring
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
[filestream.nodejs_express]
paths = ["/var/log/nodejs/express*.log"]
format = "json"
tags = ["nodejs", "express", "web-framework"]
fields = {
service = "nodejs",
framework = "express",
log_type = "application"
}
# Morgan access log format for Express
[filestream.nodejs_express_access]
paths = ["/var/log/nodejs/express-access.log"]
format = "regex"
regex = '^(?P<remote_addr>\S+) - (?P<remote_user>\S+) \[(?P<timestamp>[^\]]+)\] "(?P<request_method>\S+) (?P<request_uri>\S+) (?P<request_protocol>[^"]*)" (?P<status>\d+) (?P<response_size>\S+) "(?P<referer>[^"]*)" "(?P<user_agent>[^"]*)" (?P<response_time>\d+)ms$'
parse_timestamp = true
timestamp_field = "timestamp"
timestamp_format = "02/Jan/2006:15:04:05 -0700"
|
Express.js logging setup:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
const express = require('express');
const morgan = require('morgan');
const winston = require('./logger');
const fs = require('fs');
const path = require('path');
const app = express();
// Create access log stream
const accessLogStream = fs.createWriteStream(
path.join('/var/log/nodejs', 'express-access.log'),
{ flags: 'a' }
);
// Morgan middleware for access logs
app.use(morgan('combined', { stream: accessLogStream }));
app.use(morgan('dev')); // Console logging
// Custom request logging middleware
app.use((req, res, next) => {
const start = Date.now();
res.on('finish', () => {
const duration = Date.now() - start;
winston.info('HTTP Request', {
method: req.method,
url: req.url,
status: res.statusCode,
duration: duration,
userAgent: req.get('User-Agent'),
ip: req.ip
});
});
next();
});
|
Fastify Application Monitoring
1
2
3
4
5
6
7
8
9
|
[filestream.nodejs_fastify]
paths = ["/var/log/nodejs/fastify*.log"]
format = "json"
tags = ["nodejs", "fastify", "web-framework"]
fields = {
service = "nodejs",
framework = "fastify",
log_type = "application"
}
|
Fastify logging setup:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
const fastify = require('fastify')({
logger: {
level: 'info',
file: '/var/log/nodejs/fastify.log',
serializers: {
req: (req) => ({
method: req.method,
url: req.url,
headers: req.headers,
hostname: req.hostname,
remoteAddress: req.ip,
remotePort: req.socket.remotePort
}),
res: (res) => ({
statusCode: res.statusCode,
headers: res.headers
})
}
}
});
// Custom error handler
fastify.setErrorHandler((error, request, reply) => {
fastify.log.error(error, 'Unhandled error');
reply.status(500).send({ error: 'Internal Server Error' });
});
|
NestJS Application Monitoring
1
2
3
4
5
6
7
8
9
|
[filestream.nodejs_nestjs]
paths = ["/var/log/nodejs/nestjs*.log"]
format = "json"
tags = ["nodejs", "nestjs", "typescript", "web-framework"]
fields = {
service = "nodejs",
framework = "nestjs",
log_type = "application"
}
|
NestJS logging setup:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
import { Logger, LogLevel } from '@nestjs/common';
import { WinstonModule } from 'nest-winston';
import * as winston from 'winston';
const logger = WinstonModule.createLogger({
transports: [
new winston.transports.File({
filename: '/var/log/nodejs/nestjs-error.log',
level: 'error',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.json()
)
}),
new winston.transports.File({
filename: '/var/log/nodejs/nestjs.log',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.json()
)
})
]
});
// Bootstrap application
async function bootstrap() {
const app = await NestFactory.create(AppModule, { logger });
await app.listen(3000);
}
|
Advanced Configuration
PM2 Process Management Integration
Monitor PM2-managed Node.js processes:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
[filestream.nodejs_pm2]
paths = ["/home/nodejs/.pm2/logs/*.log"]
format = "regex"
regex = '^(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}): (?P<message>.*)$'
parse_timestamp = true
timestamp_field = "timestamp"
timestamp_format = "2006-01-02 15:04:05"
tags = ["nodejs", "pm2", "process-manager"]
fields = {
service = "nodejs",
log_type = "pm2"
}
[filestream.nodejs_pm2_error]
paths = ["/home/nodejs/.pm2/logs/*-error-*.log"]
format = "text"
tags = ["nodejs", "pm2", "error"]
fields = {
service = "nodejs",
log_type = "pm2_error"
}
|
PM2 ecosystem configuration:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
{
"apps": [{
"name": "my-node-app",
"script": "./app.js",
"instances": "max",
"exec_mode": "cluster",
"log_file": "/var/log/nodejs/pm2-combined.log",
"out_file": "/var/log/nodejs/pm2-out.log",
"error_file": "/var/log/nodejs/pm2-error.log",
"log_date_format": "YYYY-MM-DD HH:mm:ss Z",
"merge_logs": true,
"env": {
"NODE_ENV": "production",
"LOG_LEVEL": "info"
}
}]
}
|
Microservices Monitoring
Monitor distributed Node.js microservices:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
[filestream.nodejs_microservice_api]
paths = ["/var/log/nodejs/api-service*.log"]
format = "json"
tags = ["nodejs", "microservice", "api"]
fields = {
service = "nodejs",
microservice = "api-service",
log_type = "application"
}
[filestream.nodejs_microservice_auth]
paths = ["/var/log/nodejs/auth-service*.log"]
format = "json"
tags = ["nodejs", "microservice", "auth"]
fields = {
service = "nodejs",
microservice = "auth-service",
log_type = "application"
}
[filestream.nodejs_microservice_queue]
paths = ["/var/log/nodejs/queue-worker*.log"]
format = "json"
tags = ["nodejs", "microservice", "worker"]
fields = {
service = "nodejs",
microservice = "queue-worker",
log_type = "worker"
}
|
Track Node.js performance metrics:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
[filestream.nodejs_performance]
paths = ["/var/log/nodejs/performance*.log"]
format = "json"
tags = ["nodejs", "performance"]
fields = {
service = "nodejs",
log_type = "performance"
}
# Add calculated performance metrics
[filestream.nodejs_performance.processors.add_fields]
fields = {
memory_usage_mb = "{{ div .memoryUsage 1048576 }}",
cpu_usage_percent = "{{ mul .cpuUsage 100 }}",
event_loop_delay_ms = "{{ .eventLoopDelay }}"
}
|
Performance logging in Node.js:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
|
const logger = require('./logger');
const v8 = require('v8');
// Performance monitoring function
function logPerformanceMetrics() {
const memUsage = process.memoryUsage();
const cpuUsage = process.cpuUsage();
const heapStats = v8.getHeapStatistics();
logger.info('Performance Metrics', {
timestamp: new Date().toISOString(),
memoryUsage: {
rss: memUsage.rss,
heapUsed: memUsage.heapUsed,
heapTotal: memUsage.heapTotal,
external: memUsage.external
},
cpuUsage: {
user: cpuUsage.user,
system: cpuUsage.system
},
heapStatistics: {
totalHeapSize: heapStats.total_heap_size,
usedHeapSize: heapStats.used_heap_size,
heapSizeLimit: heapStats.heap_size_limit
},
uptime: process.uptime()
});
}
// Log performance metrics every 60 seconds
setInterval(logPerformanceMetrics, 60000);
|
Usage Examples
Monitor Node.js Applications
1
2
3
4
5
6
7
8
|
# Stream all Node.js logs
logflux-cli stream --filter 'service:nodejs'
# Monitor specific microservice
logflux-cli stream --filter 'service:nodejs AND microservice:api-service'
# Track error logs only
logflux-cli stream --filter 'service:nodejs AND log_type:error'
|
1
2
3
4
5
6
7
8
|
# Monitor high memory usage
logflux-cli stream --filter 'service:nodejs AND memoryUsage:>100000000'
# Track slow HTTP requests
logflux-cli stream --filter 'service:nodejs AND duration:>1000'
# Monitor PM2 process restarts
logflux-cli stream --filter 'service:nodejs AND message:restart'
|
Error Tracking
1
2
3
4
5
6
7
8
|
# Track uncaught exceptions
logflux-cli stream --filter 'service:nodejs AND level:error AND message:uncaught'
# Monitor promise rejections
logflux-cli stream --filter 'service:nodejs AND message:UnhandledPromiseRejectionWarning'
# Track application crashes
logflux-cli stream --filter 'service:nodejs AND tags:pm2 AND message:stopped'
|
Container Integration
Docker Configuration
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
FROM node:18-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
# Create log directory
RUN mkdir -p /var/log/nodejs && chown -R node:node /var/log/nodejs
COPY . .
USER node
# Volume for logs
VOLUME ["/var/log/nodejs"]
EXPOSE 3000
CMD ["npm", "start"]
|
Docker Compose with logging:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
|
version: '3.8'
services:
nodejs-app:
build: .
ports:
- "3000:3000"
volumes:
- nodejs_logs:/var/log/nodejs
environment:
- NODE_ENV=production
- LOG_LEVEL=info
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
logflux-agent:
image: logflux/agent:latest
volumes:
- nodejs_logs:/var/log/nodejs:ro
- ./logflux-config:/etc/logflux-agent/plugins
depends_on:
- nodejs-app
volumes:
nodejs_logs:
|
Kubernetes Configuration
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: nodejs-app
spec:
replicas: 3
selector:
matchLabels:
app: nodejs-app
template:
metadata:
labels:
app: nodejs-app
spec:
containers:
- name: nodejs-app
image: my-nodejs-app:latest
ports:
- containerPort: 3000
env:
- name: NODE_ENV
value: "production"
- name: LOG_LEVEL
value: "info"
volumeMounts:
- name: logs
mountPath: /var/log/nodejs
volumes:
- name: logs
emptyDir: {}
|
Monitoring and Alerting
Key Metrics to Monitor
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
|
# High memory usage alert
[alerts.nodejs_memory_usage]
query = "service:nodejs AND memoryUsage:>500000000"
threshold = 5
window = "2m"
message = "Node.js high memory usage: {{ .microservice }}"
# Application error rate alert
[alerts.nodejs_error_rate]
query = "service:nodejs AND level:error"
threshold = 10
window = "1m"
message = "High error rate in Node.js application: {{ .microservice }}"
# PM2 process crash alert
[alerts.nodejs_process_crash]
query = "service:nodejs AND tags:pm2 AND message:stopped"
threshold = 1
window = "30s"
message = "Node.js process crashed: {{ .app_name }}"
# Event loop blocking alert
[alerts.nodejs_event_loop_blocked]
query = "service:nodejs AND eventLoopDelay:>100"
threshold = 3
window = "1m"
message = "Node.js event loop blocking detected"
|
Dashboard Metrics
Monitor these key Node.js metrics:
- Request rate (requests per second by endpoint)
- Response times (average, 95th percentile)
- Error rates (by error type and microservice)
- Memory usage (heap used, RSS, external)
- CPU utilization (user time, system time)
- Event loop lag (milliseconds)
- Active handles (file descriptors, timers, sockets)
- Garbage collection (frequency, duration)
Troubleshooting
Common Issues
Node.js logs not appearing:
1
2
3
4
5
6
7
8
|
# Check Node.js process is running
ps aux | grep node
# Verify log file permissions
sudo ls -la /var/log/nodejs/
# Check application logging configuration
grep -r "winston\|pino\|console.log" /path/to/your/app/
|
JSON parsing errors:
1
2
3
4
5
6
7
8
|
# Validate JSON format
tail -n 10 /var/log/nodejs/app.log | jq .
# Check LogFlux Agent logs
sudo journalctl -u logflux-filestream -f
# Test logger configuration
node -e "const logger = require('./logger'); logger.info('test message');"
|
Performance issues:
1
2
3
4
5
6
7
8
|
# Check Node.js process resources
top -p $(pgrep node)
# Monitor event loop lag
node --inspect app.js
# Check for memory leaks
heapdump /var/log/nodejs/heap-dump.heapsnapshot
|
PM2 integration issues:
1
2
3
4
5
6
7
8
|
# Check PM2 status
pm2 status
# Verify PM2 logs configuration
pm2 logs --format
# Restart PM2 processes
pm2 restart all
|
Best Practices
- Use structured logging (JSON format) for better parsing
- Implement log levels to control verbosity
- Use async logging libraries like Pino for high performance
- Monitor event loop lag and memory usage
Security
- Sanitize sensitive data before logging (passwords, tokens)
- Implement rate limiting for log generation
- Use secure log rotation to prevent disk exhaustion
- Monitor authentication and authorization events
Development
- Use correlation IDs for request tracing
- Implement health checks and readiness probes
- Log business events and user interactions
- Use error boundaries for better error handling
Log Management
1
2
3
4
5
6
7
8
9
10
11
12
|
// Log rotation configuration
const DailyRotateFile = require('winston-daily-rotate-file');
const transport = new DailyRotateFile({
filename: '/var/log/nodejs/application-%DATE%.log',
datePattern: 'YYYY-MM-DD-HH',
zippedArchive: true,
maxSize: '20m',
maxFiles: '14d'
});
logger.add(transport);
|
Error Handling
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
// Global error handlers
process.on('uncaughtException', (error) => {
logger.error('Uncaught Exception', { error: error.message, stack: error.stack });
process.exit(1);
});
process.on('unhandledRejection', (reason, promise) => {
logger.error('Unhandled Rejection', { reason, promise });
});
// Express error handler
app.use((err, req, res, next) => {
logger.error('Express Error', {
error: err.message,
stack: err.stack,
url: req.url,
method: req.method,
ip: req.ip
});
res.status(500).json({ error: 'Internal Server Error' });
});
|
This comprehensive Node.js Applications integration provides real-time application monitoring, performance tracking, and error analysis using LogFlux Agent’s File Stream plugin. The configuration-based approach offers detailed insights into JavaScript application behavior, microservice communications, and runtime performance across different Node.js frameworks and deployment scenarios.
Disclaimer
The Node.js logo and trademarks are the property of the Node.js Foundation. LogFlux is not affiliated with, endorsed by, or sponsored by the Node.js Foundation. The Node.js logo is used solely for identification purposes to indicate compatibility and integration capabilities.