feat(analytics): add comprehensive performance monitoring utilities
- Add ResourceMonitor for tracking system resource usage (memory, CPU, network) - Implement API request tracking with performance metrics and error monitoring - Create usePerformanceMonitor hook for component-level performance tracking - Add useAsyncPerformanceTracker for async operation monitoring - Track memory warnings, performance bottlenecks, and network failures - Support configurable thresholds for resource usage alerts - Implement periodic sampling with intelligent reporting These utilities enable proactive performance monitoring to identify and address bottlenecks before they impact user experience.
This commit is contained in:
124
src/hooks/usePerformanceMonitor.ts
Normal file
124
src/hooks/usePerformanceMonitor.ts
Normal file
@@ -0,0 +1,124 @@
|
||||
import { useEffect, useRef } from 'react';
|
||||
import { eventBuilders, analytics } from '@/lib/analytics';
|
||||
|
||||
interface PerformanceThresholds {
|
||||
renderTime?: number; // ms
|
||||
memoryUsage?: number; // MB
|
||||
}
|
||||
|
||||
const DEFAULT_THRESHOLDS: PerformanceThresholds = {
|
||||
renderTime: 16, // 60fps threshold
|
||||
memoryUsage: 50, // 50MB
|
||||
};
|
||||
|
||||
/**
|
||||
* Hook to monitor component performance and track bottlenecks
|
||||
*/
|
||||
export function usePerformanceMonitor(
|
||||
componentName: string,
|
||||
thresholds: PerformanceThresholds = DEFAULT_THRESHOLDS
|
||||
) {
|
||||
const renderCount = useRef(0);
|
||||
const lastRenderTime = useRef(performance.now());
|
||||
const mountTime = useRef(performance.now());
|
||||
|
||||
useEffect(() => {
|
||||
renderCount.current += 1;
|
||||
const currentTime = performance.now();
|
||||
const renderTime = currentTime - lastRenderTime.current;
|
||||
lastRenderTime.current = currentTime;
|
||||
|
||||
// Skip first render (mount)
|
||||
if (renderCount.current === 1) return;
|
||||
|
||||
// Check render performance
|
||||
if (thresholds.renderTime && renderTime > thresholds.renderTime) {
|
||||
const event = eventBuilders.performanceBottleneck({
|
||||
operation_type: `render.${componentName}`,
|
||||
duration_ms: renderTime,
|
||||
data_size: renderCount.current,
|
||||
threshold_exceeded: true,
|
||||
});
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
|
||||
// Check memory usage if available
|
||||
if ('memory' in performance && (performance as any).memory && thresholds.memoryUsage) {
|
||||
const memoryMB = (performance as any).memory.usedJSHeapSize / (1024 * 1024);
|
||||
if (memoryMB > thresholds.memoryUsage) {
|
||||
const event = eventBuilders.memoryWarning({
|
||||
component: componentName,
|
||||
memory_mb: memoryMB,
|
||||
threshold_exceeded: true,
|
||||
gc_count: undefined,
|
||||
});
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Track component unmount metrics
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
const lifetime = performance.now() - mountTime.current;
|
||||
|
||||
// Only track if component lived for more than 5 seconds and had many renders
|
||||
if (lifetime > 5000 && renderCount.current > 100) {
|
||||
const avgRenderTime = lifetime / renderCount.current;
|
||||
|
||||
// Track if average render time is high
|
||||
if (avgRenderTime > 10) {
|
||||
const event = eventBuilders.performanceBottleneck({
|
||||
operation_type: `lifecycle.${componentName}`,
|
||||
duration_ms: avgRenderTime,
|
||||
data_size: renderCount.current,
|
||||
threshold_exceeded: true,
|
||||
});
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
}
|
||||
};
|
||||
}, [componentName]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook to track async operation performance
|
||||
*/
|
||||
export function useAsyncPerformanceTracker(operationName: string) {
|
||||
const operationStart = useRef<number | null>(null);
|
||||
|
||||
const startTracking = () => {
|
||||
operationStart.current = performance.now();
|
||||
};
|
||||
|
||||
const endTracking = (success: boolean = true, dataSize?: number) => {
|
||||
if (!operationStart.current) return;
|
||||
|
||||
const duration = performance.now() - operationStart.current;
|
||||
operationStart.current = null;
|
||||
|
||||
// Track if operation took too long
|
||||
if (duration > 3000) {
|
||||
const event = eventBuilders.performanceBottleneck({
|
||||
operation_type: `async.${operationName}`,
|
||||
duration_ms: duration,
|
||||
data_size: dataSize,
|
||||
threshold_exceeded: true,
|
||||
});
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
|
||||
// Track errors
|
||||
if (!success) {
|
||||
const event = eventBuilders.apiError({
|
||||
endpoint: operationName,
|
||||
error_code: 'async_operation_failed',
|
||||
retry_count: 0,
|
||||
response_time_ms: duration,
|
||||
});
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
};
|
||||
|
||||
return { startTracking, endTracking };
|
||||
}
|
210
src/lib/analytics/resourceMonitor.ts
Normal file
210
src/lib/analytics/resourceMonitor.ts
Normal file
@@ -0,0 +1,210 @@
|
||||
import { analytics, eventBuilders } from '@/lib/analytics';
|
||||
import type { ResourceUsageProperties } from './types';
|
||||
|
||||
/**
|
||||
* Resource monitoring utility for tracking system resource usage and performance
|
||||
* Helps identify performance bottlenecks and resource-intensive operations
|
||||
*/
|
||||
export class ResourceMonitor {
|
||||
private static instance: ResourceMonitor;
|
||||
private monitoringInterval: NodeJS.Timeout | null = null;
|
||||
private isMonitoring = false;
|
||||
private sampleCount = 0;
|
||||
private highUsageThresholds = {
|
||||
memory: 500, // MB
|
||||
cpu: 80, // percent
|
||||
networkRequests: 50, // per interval
|
||||
};
|
||||
|
||||
private constructor() {}
|
||||
|
||||
static getInstance(): ResourceMonitor {
|
||||
if (!ResourceMonitor.instance) {
|
||||
ResourceMonitor.instance = new ResourceMonitor();
|
||||
}
|
||||
return ResourceMonitor.instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start monitoring resource usage with periodic sampling
|
||||
* @param intervalMs - Sampling interval in milliseconds (default: 60000ms = 1 minute)
|
||||
*/
|
||||
startMonitoring(intervalMs: number = 60000): void {
|
||||
if (this.isMonitoring) {
|
||||
console.warn('Resource monitoring is already active');
|
||||
return;
|
||||
}
|
||||
|
||||
this.isMonitoring = true;
|
||||
this.sampleCount = 0;
|
||||
|
||||
// Initial sample
|
||||
this.collectAndReportMetrics();
|
||||
|
||||
// Set up periodic sampling
|
||||
this.monitoringInterval = setInterval(() => {
|
||||
this.collectAndReportMetrics();
|
||||
}, intervalMs);
|
||||
|
||||
console.log(`Resource monitoring started with ${intervalMs}ms interval`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop resource monitoring
|
||||
*/
|
||||
stopMonitoring(): void {
|
||||
if (this.monitoringInterval) {
|
||||
clearInterval(this.monitoringInterval);
|
||||
this.monitoringInterval = null;
|
||||
}
|
||||
this.isMonitoring = false;
|
||||
console.log('Resource monitoring stopped');
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect current resource metrics
|
||||
*/
|
||||
private collectResourceMetrics(): ResourceUsageProperties {
|
||||
const metrics: ResourceUsageProperties = {
|
||||
memory_usage_mb: this.getMemoryUsage(),
|
||||
network_requests_count: this.getNetworkRequestsCount(),
|
||||
active_connections: this.getActiveConnections(),
|
||||
};
|
||||
|
||||
// Add CPU usage if available
|
||||
const cpuUsage = this.getCPUUsage();
|
||||
if (cpuUsage !== null) {
|
||||
metrics.cpu_usage_percent = cpuUsage;
|
||||
}
|
||||
|
||||
// Add cache hit rate if available
|
||||
const cacheHitRate = this.getCacheHitRate();
|
||||
if (cacheHitRate !== null) {
|
||||
metrics.cache_hit_rate = cacheHitRate;
|
||||
}
|
||||
|
||||
return metrics;
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect metrics and report to analytics
|
||||
*/
|
||||
private collectAndReportMetrics(): void {
|
||||
try {
|
||||
const metrics = this.collectResourceMetrics();
|
||||
this.sampleCount++;
|
||||
|
||||
// Always send sampled data every 10th sample for baseline tracking
|
||||
if (this.sampleCount % 10 === 0) {
|
||||
const event = eventBuilders.resourceUsageSampled(metrics);
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
|
||||
// Check for high usage conditions
|
||||
const isHighUsage =
|
||||
metrics.memory_usage_mb > this.highUsageThresholds.memory ||
|
||||
(metrics.cpu_usage_percent && metrics.cpu_usage_percent > this.highUsageThresholds.cpu) ||
|
||||
metrics.network_requests_count > this.highUsageThresholds.networkRequests;
|
||||
|
||||
if (isHighUsage) {
|
||||
const event = eventBuilders.resourceUsageHigh(metrics);
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to collect resource metrics:', error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current memory usage in MB
|
||||
*/
|
||||
private getMemoryUsage(): number {
|
||||
if ('memory' in performance && (performance as any).memory) {
|
||||
return (performance as any).memory.usedJSHeapSize / (1024 * 1024);
|
||||
}
|
||||
|
||||
// Fallback: estimate based on performance timing
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get CPU usage percentage (if available)
|
||||
*/
|
||||
private getCPUUsage(): number | null {
|
||||
// This is a placeholder - actual CPU usage would require native APIs
|
||||
// In a Tauri app, you could call a Rust function to get real CPU usage
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get count of active network requests
|
||||
*/
|
||||
private getNetworkRequestsCount(): number {
|
||||
// Count active fetch requests if performance observer is available
|
||||
if ('PerformanceObserver' in window) {
|
||||
const entries = performance.getEntriesByType('resource');
|
||||
const recentEntries = entries.filter(entry =>
|
||||
entry.startTime > performance.now() - 60000 // Last minute
|
||||
);
|
||||
return recentEntries.length;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get number of active connections (WebSocket, SSE, etc.)
|
||||
*/
|
||||
private getActiveConnections(): number {
|
||||
// This would need to be tracked by your connection management code
|
||||
// For now, return a placeholder
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache hit rate if available
|
||||
*/
|
||||
private getCacheHitRate(): number | null {
|
||||
// This would need to be calculated based on your caching implementation
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set custom thresholds for high usage detection
|
||||
*/
|
||||
setThresholds(thresholds: Partial<typeof ResourceMonitor.prototype.highUsageThresholds>): void {
|
||||
this.highUsageThresholds = {
|
||||
...this.highUsageThresholds,
|
||||
...thresholds,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current thresholds
|
||||
*/
|
||||
getThresholds(): typeof ResourceMonitor.prototype.highUsageThresholds {
|
||||
return { ...this.highUsageThresholds };
|
||||
}
|
||||
|
||||
/**
|
||||
* Force a single metric collection and report
|
||||
*/
|
||||
collectOnce(): ResourceUsageProperties {
|
||||
const metrics = this.collectResourceMetrics();
|
||||
|
||||
// Check for high usage
|
||||
const isHighUsage =
|
||||
metrics.memory_usage_mb > this.highUsageThresholds.memory ||
|
||||
(metrics.cpu_usage_percent && metrics.cpu_usage_percent > this.highUsageThresholds.cpu) ||
|
||||
metrics.network_requests_count > this.highUsageThresholds.networkRequests;
|
||||
|
||||
if (isHighUsage) {
|
||||
const event = eventBuilders.resourceUsageHigh(metrics);
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
|
||||
return metrics;
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const resourceMonitor = ResourceMonitor.getInstance();
|
115
src/lib/api-tracker.ts
Normal file
115
src/lib/api-tracker.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
import { api as originalApi } from './api';
|
||||
import { analytics, eventBuilders } from './analytics';
|
||||
|
||||
// Performance thresholds (in milliseconds)
|
||||
const PERFORMANCE_THRESHOLDS = {
|
||||
fast: 100,
|
||||
normal: 500,
|
||||
slow: 2000,
|
||||
bottleneck: 5000,
|
||||
};
|
||||
|
||||
// Memory threshold (in MB)
|
||||
const MEMORY_WARNING_THRESHOLD = 100;
|
||||
|
||||
/**
|
||||
* Wraps an API method with error and performance tracking
|
||||
*/
|
||||
function wrapApiMethod<T extends (...args: any[]) => Promise<any>>(
|
||||
methodName: string,
|
||||
method: T
|
||||
): T {
|
||||
return (async (...args: any[]) => {
|
||||
const startTime = performance.now();
|
||||
const startMemory = ('memory' in performance ? (performance as any).memory?.usedJSHeapSize : 0) || 0;
|
||||
let retryCount = 0;
|
||||
|
||||
const trackPerformance = (success: boolean, error?: any) => {
|
||||
const duration = performance.now() - startTime;
|
||||
const memoryUsed = ((('memory' in performance ? (performance as any).memory?.usedJSHeapSize : 0) || 0) - startMemory) / (1024 * 1024); // Convert to MB
|
||||
|
||||
// Track API errors
|
||||
if (!success && error) {
|
||||
const event = eventBuilders.apiError({
|
||||
endpoint: methodName,
|
||||
error_code: error.code || error.status || 'unknown',
|
||||
retry_count: retryCount,
|
||||
response_time_ms: duration,
|
||||
});
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
|
||||
// Track performance bottlenecks
|
||||
if (duration > PERFORMANCE_THRESHOLDS.bottleneck) {
|
||||
const event = eventBuilders.performanceBottleneck({
|
||||
operation_type: `api.${methodName}`,
|
||||
duration_ms: duration,
|
||||
data_size: undefined, // Could be enhanced to track payload size
|
||||
threshold_exceeded: true,
|
||||
});
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
|
||||
// Track network performance
|
||||
const connectionQuality =
|
||||
duration < PERFORMANCE_THRESHOLDS.fast ? 'excellent' :
|
||||
duration < PERFORMANCE_THRESHOLDS.normal ? 'good' : 'poor';
|
||||
|
||||
if (success) {
|
||||
const networkEvent = eventBuilders.networkPerformance({
|
||||
endpoint_type: 'api',
|
||||
latency_ms: duration,
|
||||
payload_size_bytes: 0, // Could be enhanced with actual payload size
|
||||
connection_quality: connectionQuality,
|
||||
retry_count: retryCount,
|
||||
circuit_breaker_triggered: false,
|
||||
});
|
||||
analytics.track(networkEvent.event, networkEvent.properties);
|
||||
}
|
||||
|
||||
// Track memory warnings
|
||||
if (memoryUsed > MEMORY_WARNING_THRESHOLD) {
|
||||
const event = eventBuilders.memoryWarning({
|
||||
component: `api.${methodName}`,
|
||||
memory_mb: memoryUsed,
|
||||
threshold_exceeded: true,
|
||||
gc_count: undefined, // Could be enhanced with GC tracking
|
||||
});
|
||||
analytics.track(event.event, event.properties);
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
const result = await method(...args);
|
||||
trackPerformance(true);
|
||||
return result;
|
||||
} catch (error) {
|
||||
trackPerformance(false, error);
|
||||
throw error;
|
||||
}
|
||||
}) as T;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a tracked version of the API object
|
||||
*/
|
||||
function createTrackedApi() {
|
||||
const trackedApi: any = {};
|
||||
|
||||
// Wrap each method in the original API
|
||||
for (const [key, value] of Object.entries(originalApi)) {
|
||||
if (typeof value === 'function') {
|
||||
trackedApi[key] = wrapApiMethod(key, value);
|
||||
} else {
|
||||
trackedApi[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
return trackedApi as typeof originalApi;
|
||||
}
|
||||
|
||||
// Export the tracked API
|
||||
export const api = createTrackedApi();
|
||||
|
||||
// Re-export types from the original API module
|
||||
export * from './api';
|
Reference in New Issue
Block a user