經過 Day 21-22 的測試框架建立與整合測試實作,我們已經有了完整的測試套件。今天我們要將測試整合到 CI/CD 流程中,實現真正的自動化測試與持續整合,讓每次程式碼變更都能自動驗證品質。
讓我們先了解不同階段的 CI/CD 測試策略:
/**
* CI/CD 測試成熟度模型
*
* Level 1: Manual Testing (手動測試)
* ❌ 每次部署前手動跑測試
* ❌ 容易遺漏測試步驟
* ❌ 無法保證一致性
*
* Level 2: Basic CI (基礎持續整合)
* ✅ PR 時自動跑測試
* ⚠️ 只有單元測試
* ⚠️ 沒有測試報告
*
* Level 3: Advanced CI (進階持續整合)
* ✅ 多層級測試(Unit + Integration + E2E)
* ✅ 測試報告與覆蓋率追蹤
* ✅ 失敗時自動回滾
* ⚠️ 測試環境與生產環境有差異
*
* Level 4: Production-Grade CI/CD (生產級 CI/CD) ⭐️
* ✅ 完整測試管線
* ✅ 環境一致性驗證
* ✅ 效能基準測試
* ✅ 安全性掃描
* ✅ 自動化部署與驗證
* ✅ 即時監控與告警
*/
# .github/workflows/test-matrix.yml
name: Test Matrix
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
jobs:
test:
name: Test on Node ${{ matrix.node-version }} / ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
# 即使某個測試失敗,其他組合繼續執行
fail-fast: false
matrix:
# 測試多個 Node.js 版本
node-version: [18.x, 20.x, 21.x]
# 測試多個作業系統
os: [ubuntu-latest, macos-latest]
# 排除特定組合
exclude:
# macOS 上只測試最新版本(節省 CI 時間)
- os: macos-latest
node-version: 18.x
# 包含額外的測試組合
include:
# Windows 上測試最新 LTS
- os: windows-latest
node-version: 20.x
services:
# Redis 服務容器
redis:
image: redis:7-alpine
ports:
- 6379:6379
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
# PostgreSQL 服務容器
postgres:
image: postgres:15-alpine
env:
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
POSTGRES_DB: kyo_test
ports:
- 5432:5432
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
# 獲取完整歷史以便分析變更
fetch-depth: 0
- name: Setup pnpm
uses: pnpm/action-setup@v2
with:
version: 8
- name: Setup Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: 'pnpm'
- name: Get pnpm store directory
id: pnpm-cache
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT
- name: Setup pnpm cache
uses: actions/cache@v3
with:
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Build packages
run: pnpm -w --filter "@kyong/*" build
- name: Run linting
run: pnpm run lint
# 只在 Ubuntu + Node 20 上執行 lint(避免重複)
if: matrix.os == 'ubuntu-latest' && matrix.node-version == '20.x'
- name: Run type checking
run: pnpm run type-check
- name: Run unit tests
run: pnpm run test:unit
env:
NODE_ENV: test
- name: Run integration tests
run: pnpm run test:integration
env:
NODE_ENV: test
DATABASE_URL: postgresql://test_user:test_password@localhost:5432/kyo_test
REDIS_HOST: localhost
REDIS_PORT: 6379
- name: Run E2E tests
run: pnpm run test:e2e
env:
NODE_ENV: test
DATABASE_URL: postgresql://test_user:test_password@localhost:5432/kyo_test
REDIS_HOST: localhost
REDIS_PORT: 6379
- name: Generate coverage report
run: pnpm run test:coverage
if: matrix.os == 'ubuntu-latest' && matrix.node-version == '20.x'
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
if: matrix.os == 'ubuntu-latest' && matrix.node-version == '20.x'
with:
files: ./coverage/lcov.info
flags: unittests
name: codecov-${{ matrix.os }}-node-${{ matrix.node-version }}
fail_ci_if_error: false
- name: Upload test results
uses: actions/upload-artifact@v3
if: always()
with:
name: test-results-${{ matrix.os }}-node-${{ matrix.node-version }}
path: |
coverage/
test-results/
retention-days: 7
# .github/workflows/coverage.yml
name: Coverage Check
on:
pull_request:
branches: [main, develop]
jobs:
coverage:
name: Check Test Coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Build packages
run: pnpm run build
- name: Generate coverage
run: pnpm run test:coverage
- name: Check coverage thresholds
run: |
# 讀取覆蓋率報告
COVERAGE=$(cat coverage/coverage-summary.json)
# 提取各項覆蓋率
LINES=$(echo $COVERAGE | jq '.total.lines.pct')
STATEMENTS=$(echo $COVERAGE | jq '.total.statements.pct')
FUNCTIONS=$(echo $COVERAGE | jq '.total.functions.pct')
BRANCHES=$(echo $COVERAGE | jq '.total.branches.pct')
echo "Coverage Summary:"
echo "Lines: $LINES%"
echo "Statements: $STATEMENTS%"
echo "Functions: $FUNCTIONS%"
echo "Branches: $BRANCHES%"
# 檢查是否達到門檻
THRESHOLD=80
if (( $(echo "$LINES < $THRESHOLD" | bc -l) )); then
echo "❌ Line coverage ($LINES%) is below threshold ($THRESHOLD%)"
exit 1
fi
if (( $(echo "$STATEMENTS < $THRESHOLD" | bc -l) )); then
echo "❌ Statement coverage ($STATEMENTS%) is below threshold ($THRESHOLD%)"
exit 1
fi
if (( $(echo "$FUNCTIONS < $THRESHOLD" | bc -l) )); then
echo "❌ Function coverage ($FUNCTIONS%) is below threshold ($THRESHOLD%)"
exit 1
fi
if (( $(echo "$BRANCHES < 75" | bc -l) )); then
echo "❌ Branch coverage ($BRANCHES%) is below threshold (75%)"
exit 1
fi
echo "✅ All coverage thresholds met!"
- name: Coverage diff
uses: artiomtr/jest-coverage-report-action@v2
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
test-script: pnpm run test:coverage
annotations: all
package-manager: pnpm
- name: Comment PR with coverage
uses: romeovs/lcov-reporter-action@v0.3.1
with:
lcov-file: ./coverage/lcov.info
github-token: ${{ secrets.GITHUB_TOKEN }}
delete-old-comments: true
# .github/workflows/benchmark.yml
name: Performance Benchmark
on:
pull_request:
branches: [main]
push:
branches: [main]
jobs:
benchmark:
name: Run Performance Benchmarks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'pnpm'
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Build packages
run: pnpm run build
- name: Run benchmarks
run: pnpm run benchmark
- name: Store benchmark result
uses: benchmark-action/github-action-benchmark@v1
with:
name: Node.js Benchmark
tool: 'benchmarkjs'
output-file-path: benchmark-results.json
github-token: ${{ secrets.GITHUB_TOKEN }}
# 自動推送結果到 gh-pages 分支
auto-push: true
# 如果效能降低超過 10% 則告警
alert-threshold: '110%'
comment-on-alert: true
fail-on-alert: false
alert-comment-cc-users: '@morrislin'
- name: Comment PR with benchmark results
uses: actions/github-script@v6
if: github.event_name == 'pull_request'
with:
script: |
const fs = require('fs');
const results = JSON.parse(fs.readFileSync('benchmark-results.json', 'utf8'));
let comment = '## ⚡ Performance Benchmark Results\n\n';
comment += '| Benchmark | ops/sec | Comparison |\n';
comment += '|-----------|---------|------------|\n';
results.forEach(result => {
const comparison = result.comparison || 'baseline';
const emoji = comparison === 'faster' ? '🚀' :
comparison === 'slower' ? '🐌' : '⚖️';
comment += `| ${result.name} | ${result.opsPerSecond} | ${emoji} ${comparison} |\n`;
});
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: comment
});
// apps/kyo-otp-service/benchmark/otp-generation.bench.ts
import Benchmark from 'benchmark';
import crypto from 'crypto';
/**
* OTP 生成效能測試
* 比較不同算法的效能差異
*/
const suite = new Benchmark.Suite();
// 方法 1: crypto.randomInt (Node.js 內建)
function generateOTPCryptoRandomInt(): string {
let otp = '';
for (let i = 0; i < 6; i++) {
otp += crypto.randomInt(0, 10);
}
return otp;
}
// 方法 2: crypto.randomBytes (更底層)
function generateOTPRandomBytes(): string {
const buffer = crypto.randomBytes(3);
const number = buffer.readUIntBE(0, 3);
return (number % 1000000).toString().padStart(6, '0');
}
// 方法 3: Math.random (不安全但快速)
function generateOTPMathRandom(): string {
return Math.floor(100000 + Math.random() * 900000).toString();
}
// 方法 4: 預生成池 (最快)
class OTPPool {
private pool: string[] = [];
private readonly poolSize = 1000;
constructor() {
this.refillPool();
}
private refillPool(): void {
for (let i = this.pool.length; i < this.poolSize; i++) {
this.pool.push(generateOTPRandomBytes());
}
}
getOTP(): string {
if (this.pool.length < 100) {
this.refillPool();
}
return this.pool.pop()!;
}
}
const pool = new OTPPool();
// 加入測試案例
suite
.add('crypto.randomInt', () => {
generateOTPCryptoRandomInt();
})
.add('crypto.randomBytes', () => {
generateOTPRandomBytes();
})
.add('Math.random (unsafe)', () => {
generateOTPMathRandom();
})
.add('Pre-generated pool', () => {
pool.getOTP();
})
.on('cycle', (event: Benchmark.Event) => {
console.log(String(event.target));
})
.on('complete', function(this: Benchmark.Suite) {
console.log('\n🏆 Fastest is ' + this.filter('fastest').map('name'));
// 輸出詳細比較
console.log('\n📊 Performance Comparison:');
const results = Array.from(this);
const fastest = results.reduce((a, b) => a.hz > b.hz ? a : b);
results.forEach(bench => {
const slower = (fastest.hz / bench.hz).toFixed(2);
console.log(` ${bench.name}: ${bench.hz.toFixed(0)} ops/sec (${slower}x slower than fastest)`);
});
})
.run({ async: true });
/**
* 預期結果分析:
*
* 1. Pre-generated pool: ~10,000,000 ops/sec
* - 最快,因為只是從陣列取值
* - 適合高並發場景
* - 需要管理記憶體
*
* 2. crypto.randomBytes: ~500,000 ops/sec
* - 安全且快速的平衡
* - 推薦用於生產環境
*
* 3. crypto.randomInt: ~200,000 ops/sec
* - 較慢但程式碼更簡潔
* - 適合中低流量場景
*
* 4. Math.random: ~50,000,000 ops/sec
* - 最快但不安全
* - 絕對不要用於 OTP!
*/
// benchmark/redis-operations.bench.ts
import Benchmark from 'benchmark';
import Redis from 'ioredis';
const suite = new Benchmark.Suite();
// 測試不同的 Redis 操作策略
const redis = new Redis({
host: 'localhost',
port: 6379,
lazyConnect: true,
});
// 準備測試資料
const testData = Array.from({ length: 100 }, (_, i) => ({
key: `test:key:${i}`,
value: `value_${i}`,
}));
suite
.add('Sequential SET', {
defer: true,
fn: async (deferred: any) => {
for (const item of testData) {
await redis.set(item.key, item.value);
}
deferred.resolve();
},
})
.add('Pipeline SET', {
defer: true,
fn: async (deferred: any) => {
const pipeline = redis.pipeline();
for (const item of testData) {
pipeline.set(item.key, item.value);
}
await pipeline.exec();
deferred.resolve();
},
})
.add('Sequential GET', {
defer: true,
fn: async (deferred: any) => {
for (const item of testData) {
await redis.get(item.key);
}
deferred.resolve();
},
})
.add('Pipeline GET', {
defer: true,
fn: async (deferred: any) => {
const pipeline = redis.pipeline();
for (const item of testData) {
pipeline.get(item.key);
}
await pipeline.exec();
deferred.resolve();
},
})
.add('MGET (multi-get)', {
defer: true,
fn: async (deferred: any) => {
await redis.mget(testData.map(item => item.key));
deferred.resolve();
},
})
.on('cycle', (event: Benchmark.Event) => {
console.log(String(event.target));
})
.on('complete', function(this: Benchmark.Suite) {
console.log('\n🏆 Fastest is ' + this.filter('fastest').map('name'));
redis.disconnect();
})
.run({ async: true });
/**
* 預期結果:
*
* MGET (multi-get): ~1,000 ops/sec
* Pipeline GET: ~800 ops/sec
* Pipeline SET: ~600 ops/sec
* Sequential GET: ~100 ops/sec
* Sequential SET: ~80 ops/sec
*
* 結論:
* - Pipeline 比 Sequential 快 8-10 倍
* - MGET 比 Pipeline GET 快 20%+
* - 批次操作是 Redis 效能關鍵
*/
// scripts/publish-test-report.ts
import { S3Client, PutObjectCommand } from '@aws-sdk/client-s3';
import fs from 'fs';
import path from 'path';
interface TestReport {
timestamp: string;
branch: string;
commit: string;
coverage: {
lines: number;
statements: number;
functions: number;
branches: number;
};
tests: {
total: number;
passed: number;
failed: number;
skipped: number;
};
duration: number;
benchmarks?: Array<{
name: string;
opsPerSecond: number;
margin: number;
}>;
}
async function publishTestReport() {
// 讀取測試報告
const coverageSummary = JSON.parse(
fs.readFileSync('coverage/coverage-summary.json', 'utf-8')
);
const testResults = JSON.parse(
fs.readFileSync('test-results/results.json', 'utf-8')
);
// 建構報告
const report: TestReport = {
timestamp: new Date().toISOString(),
branch: process.env.GITHUB_REF_NAME || 'unknown',
commit: process.env.GITHUB_SHA || 'unknown',
coverage: {
lines: coverageSummary.total.lines.pct,
statements: coverageSummary.total.statements.pct,
functions: coverageSummary.total.functions.pct,
branches: coverageSummary.total.branches.pct,
},
tests: {
total: testResults.numTotalTests,
passed: testResults.numPassedTests,
failed: testResults.numFailedTests,
skipped: testResults.numPendingTests,
},
duration: testResults.testDuration,
};
// 上傳到 S3
const s3 = new S3Client({ region: process.env.AWS_REGION });
const key = `test-reports/${report.branch}/${report.commit}.json`;
await s3.send(
new PutObjectCommand({
Bucket: 'kyo-test-reports',
Key: key,
Body: JSON.stringify(report, null, 2),
ContentType: 'application/json',
Metadata: {
branch: report.branch,
commit: report.commit,
},
})
);
console.log(`✅ Test report published: s3://kyo-test-reports/${key}`);
// 生成 HTML 報告
const html = generateHTMLReport(report);
await s3.send(
new PutObjectCommand({
Bucket: 'kyo-test-reports',
Key: `test-reports/${report.branch}/${report.commit}.html`,
Body: html,
ContentType: 'text/html',
})
);
// 更新 latest
await s3.send(
new PutObjectCommand({
Bucket: 'kyo-test-reports',
Key: `test-reports/${report.branch}/latest.json`,
Body: JSON.stringify(report, null, 2),
ContentType: 'application/json',
})
);
}
function generateHTMLReport(report: TestReport): string {
return `
<!DOCTYPE html>
<html>
<head>
<title>Test Report - ${report.commit.substring(0, 7)}</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
max-width: 1200px;
margin: 0 auto;
padding: 20px;
background: #f5f5f5;
}
.header {
background: white;
padding: 30px;
border-radius: 8px;
margin-bottom: 20px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.metric {
display: inline-block;
margin: 10px 20px 10px 0;
}
.metric-label {
font-size: 12px;
color: #666;
text-transform: uppercase;
}
.metric-value {
font-size: 32px;
font-weight: bold;
color: #2ecc71;
}
.metric-value.warning { color: #f39c12; }
.metric-value.danger { color: #e74c3c; }
.coverage-bar {
height: 30px;
background: #ecf0f1;
border-radius: 4px;
overflow: hidden;
margin: 10px 0;
}
.coverage-fill {
height: 100%;
background: linear-gradient(90deg, #2ecc71, #27ae60);
display: flex;
align-items: center;
justify-content: center;
color: white;
font-weight: bold;
}
</style>
</head>
<body>
<div class="header">
<h1>🧪 Test Report</h1>
<p><strong>Branch:</strong> ${report.branch}</p>
<p><strong>Commit:</strong> ${report.commit}</p>
<p><strong>Time:</strong> ${new Date(report.timestamp).toLocaleString()}</p>
</div>
<div class="header">
<h2>📊 Test Results</h2>
<div class="metric">
<div class="metric-label">Total Tests</div>
<div class="metric-value">${report.tests.total}</div>
</div>
<div class="metric">
<div class="metric-label">Passed</div>
<div class="metric-value">${report.tests.passed}</div>
</div>
<div class="metric">
<div class="metric-label">Failed</div>
<div class="metric-value ${report.tests.failed > 0 ? 'danger' : ''}">${report.tests.failed}</div>
</div>
<div class="metric">
<div class="metric-label">Duration</div>
<div class="metric-value">${(report.duration / 1000).toFixed(2)}s</div>
</div>
</div>
<div class="header">
<h2>📈 Coverage</h2>
${Object.entries(report.coverage).map(([key, value]) => `
<div>
<div class="metric-label">${key}</div>
<div class="coverage-bar">
<div class="coverage-fill" style="width: ${value}%">${value.toFixed(1)}%</div>
</div>
</div>
`).join('')}
</div>
</body>
</html>
`;
}
publishTestReport().catch(console.error);
# .github/workflows/deploy-with-rollback.yml
name: Deploy with Automatic Rollback
on:
push:
branches: [main]
jobs:
test-and-deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
# ... 省略前面的 setup 步驟 ...
- name: Run all tests
id: tests
run: pnpm run test:all
continue-on-error: true
- name: Check test results
if: steps.tests.outcome == 'failure'
run: |
echo "❌ Tests failed, deployment aborted"
exit 1
- name: Build Docker image
run: |
docker build -t kyo-api:${{ github.sha }} .
docker tag kyo-api:${{ github.sha }} kyo-api:latest
- name: Push to ECR
run: |
aws ecr get-login-password --region ap-northeast-1 | \
docker login --username AWS --password-stdin $ECR_REGISTRY
docker push kyo-api:${{ github.sha }}
docker push kyo-api:latest
- name: Deploy to ECS
id: deploy
run: |
aws ecs update-service \
--cluster kyo-production \
--service kyo-api \
--force-new-deployment \
--desired-count 3
- name: Wait for deployment
run: |
aws ecs wait services-stable \
--cluster kyo-production \
--services kyo-api \
--timeout 600
- name: Run smoke tests
id: smoke_tests
run: pnpm run test:smoke
continue-on-error: true
- name: Rollback on smoke test failure
if: steps.smoke_tests.outcome == 'failure'
run: |
echo "🚨 Smoke tests failed, rolling back..."
# 獲取前一個穩定版本
PREVIOUS_TASK_DEF=$(aws ecs describe-services \
--cluster kyo-production \
--services kyo-api \
--query 'services[0].deployments[1].taskDefinition' \
--output text)
# 回滾到前一版本
aws ecs update-service \
--cluster kyo-production \
--service kyo-api \
--task-definition $PREVIOUS_TASK_DEF \
--force-new-deployment
exit 1
- name: Notify deployment status
if: always()
uses: 8398a7/action-slack@v3
with:
status: ${{ job.status }}
text: |
Deployment ${{ job.status }}
Branch: ${{ github.ref }}
Commit: ${{ github.sha }}
Smoke Tests: ${{ steps.smoke_tests.outcome }}
webhook_url: ${{ secrets.SLACK_WEBHOOK }}
// tests/smoke/api-smoke.test.ts
import { test, describe } from 'node:test';
import assert from 'node:assert/strict';
/**
* Smoke Tests: 部署後的快速健康檢查
* 目標:在 30 秒內驗證核心功能可用
*/
const API_BASE_URL = process.env.API_URL || 'https://api.kyong.com';
const TIMEOUT = 5000; // 5秒超時
describe('API Smoke Tests', () => {
test('Health endpoint responds', async () => {
const response = await fetchWithTimeout(`${API_BASE_URL}/health`, TIMEOUT);
assert.equal(response.status, 200);
const data = await response.json();
assert.equal(data.status, 'ok');
});
test('Can send OTP', async () => {
const response = await fetchWithTimeout(`${API_BASE_URL}/api/otp/send`, TIMEOUT, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-Tenant-ID': 'smoke-test',
},
body: JSON.stringify({
phone: '0912345678',
template: 'test',
}),
});
assert.ok(response.status === 200 || response.status === 429); // 200 或 rate limited
});
test('Redis is accessible', async () => {
const response = await fetchWithTimeout(`${API_BASE_URL}/health/redis`, TIMEOUT);
assert.equal(response.status, 200);
const data = await response.json();
assert.equal(data.redis.healthy, true);
});
test('Database is accessible', async () => {
const response = await fetchWithTimeout(`${API_BASE_URL}/health/db`, TIMEOUT);
assert.equal(response.status, 200);
const data = await response.json();
assert.equal(data.database.healthy, true);
});
test('API latency is acceptable', async () => {
const startTime = Date.now();
const response = await fetchWithTimeout(`${API_BASE_URL}/health`, TIMEOUT);
const latency = Date.now() - startTime;
assert.ok(response.status === 200);
assert.ok(latency < 1000, `API latency ${latency}ms exceeds 1000ms threshold`);
});
});
async function fetchWithTimeout(
url: string,
timeout: number,
options?: RequestInit
): Promise<Response> {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), timeout);
try {
const response = await fetch(url, {
...options,
signal: controller.signal,
});
return response;
} finally {
clearTimeout(timeoutId);
}
}
// vitest.config.ts
export default defineConfig({
test: {
// 使用多執行緒加速測試
threads: true,
// 最大並行數(根據 CPU 核心數調整)
maxConcurrency: 10,
// 單一檔案內的測試也可以並行
isolate: false,
// 測試檔案執行順序優化
sequence: {
// 先執行快速測試
shuffle: false,
hooks: 'parallel',
},
// 快取測試結果
cache: {
dir: 'node_modules/.vitest',
},
},
});
// tests/helpers/db-pool.ts
import { Pool } from 'pg';
/**
* 測試資料庫連線池管理
* 避免每個測試都建立新連線
*/
class TestDatabasePool {
private static instance: Pool;
private static connectionCount = 0;
static async getPool(): Promise<Pool> {
if (!this.instance) {
this.instance = new Pool({
connectionString: process.env.TEST_DATABASE_URL,
max: 20, // 最大連線數
min: 5, // 最小連線數
idleTimeoutMillis: 30000,
});
// 預熱連線池
await this.warmup();
}
this.connectionCount++;
return this.instance;
}
private static async warmup(): Promise<void> {
const clients = [];
for (let i = 0; i < 5; i++) {
clients.push(this.instance.connect());
}
const connections = await Promise.all(clients);
connections.forEach(client => client.release());
}
static async close(): Promise<void> {
if (this.instance) {
await this.instance.end();
this.instance = null as any;
}
}
}
export { TestDatabasePool };
我們今天完成了測試三部曲的最終章:
矩陣測試 vs 單一環境測試:
覆蓋率門檻策略:
Benchmark 最佳實踐: