Initial commit: Cloud Instances API
Multi-cloud VM instance database with Cloudflare Workers - Linode, Vultr, AWS connector integration - D1 database with regions, instances, pricing - Query API with filtering, caching, pagination - Cron-based auto-sync (daily + 6-hourly) - Health monitoring endpoint Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
149
src/services/cache.test.ts
Normal file
149
src/services/cache.test.ts
Normal file
@@ -0,0 +1,149 @@
|
||||
/**
|
||||
* Manual Test Instructions for CacheService
|
||||
*
|
||||
* Since Cloudflare Workers Cache API is only available in the Workers runtime,
|
||||
* these tests must be run in a Cloudflare Workers environment or using Miniflare.
|
||||
*
|
||||
* Test 1: Basic Cache Operations
|
||||
* -------------------------------
|
||||
* 1. Deploy to Cloudflare Workers development environment
|
||||
* 2. Initialize cache: const cache = new CacheService(300);
|
||||
* 3. Set data: await cache.set('test-key', { foo: 'bar' }, 60);
|
||||
* 4. Get data: const result = await cache.get('test-key');
|
||||
* 5. Expected: result.data.foo === 'bar', cache_age_seconds ≈ 0
|
||||
*
|
||||
* Test 2: Cache Key Generation
|
||||
* -----------------------------
|
||||
* 1. Generate key: const key = cache.generateKey({ provider: 'linode', region: 'us-east' });
|
||||
* 2. Expected: key === 'https://cache.internal/instances?provider=linode®ion=us-east'
|
||||
* 3. Verify sorting: cache.generateKey({ z: 1, a: 2 }) should have 'a' before 'z'
|
||||
*
|
||||
* Test 3: Cache Miss
|
||||
* ------------------
|
||||
* 1. Request non-existent key: const result = await cache.get('non-existent');
|
||||
* 2. Expected: result === null
|
||||
*
|
||||
* Test 4: Cache Expiration
|
||||
* ------------------------
|
||||
* 1. Set with short TTL: await cache.set('expire-test', { data: 'test' }, 2);
|
||||
* 2. Immediate get: await cache.get('expire-test') → should return data
|
||||
* 3. Wait 3 seconds
|
||||
* 4. Get again: await cache.get('expire-test') → should return null (expired)
|
||||
*
|
||||
* Test 5: Cache Age Tracking
|
||||
* --------------------------
|
||||
* 1. Set data: await cache.set('age-test', { data: 'test' }, 300);
|
||||
* 2. Wait 5 seconds
|
||||
* 3. Get data: const result = await cache.get('age-test');
|
||||
* 4. Expected: result.cache_age_seconds ≈ 5
|
||||
*
|
||||
* Test 6: Cache Deletion
|
||||
* ----------------------
|
||||
* 1. Set data: await cache.set('delete-test', { data: 'test' }, 300);
|
||||
* 2. Delete: const deleted = await cache.delete('delete-test');
|
||||
* 3. Expected: deleted === true
|
||||
* 4. Get data: const result = await cache.get('delete-test');
|
||||
* 5. Expected: result === null
|
||||
*
|
||||
* Test 7: Error Handling (Graceful Degradation)
|
||||
* ----------------------------------------------
|
||||
* 1. Test with invalid cache response (manual mock required)
|
||||
* 2. Expected: No errors thrown, graceful null return
|
||||
* 3. Verify logs show error message
|
||||
*
|
||||
* Test 8: Integration with Instance API
|
||||
* --------------------------------------
|
||||
* 1. Create cache instance in instance endpoint handler
|
||||
* 2. Generate key from query params: cache.generateKey(query)
|
||||
* 3. Check cache: const cached = await cache.get<InstanceData[]>(key);
|
||||
* 4. If cache hit: return cached.data with cache metadata
|
||||
* 5. If cache miss: fetch from database, cache result, return data
|
||||
* 6. Verify cache hit on second request
|
||||
*
|
||||
* Performance Validation:
|
||||
* -----------------------
|
||||
* 1. Measure database query time (first request)
|
||||
* 2. Measure cache hit time (second request)
|
||||
* 3. Expected: Cache hit 10-50x faster than database query
|
||||
* 4. Verify cache age increases on subsequent requests
|
||||
*
|
||||
* TTL Strategy Validation:
|
||||
* ------------------------
|
||||
* Filtered queries (5 min TTL):
|
||||
* - cache.set(key, data, 300)
|
||||
* - Verify expires after 5 minutes
|
||||
*
|
||||
* Full dataset (1 hour TTL):
|
||||
* - cache.set(key, data, 3600)
|
||||
* - Verify expires after 1 hour
|
||||
*
|
||||
* Post-sync invalidation:
|
||||
* - After sync operation, call cache.delete(key) for all relevant keys
|
||||
* - Verify next request fetches fresh data from database
|
||||
*/
|
||||
|
||||
import { CacheService } from './cache';
|
||||
import type { InstanceData } from '../types';
|
||||
|
||||
/**
|
||||
* Example: Using CacheService in API endpoint
|
||||
*/
|
||||
async function exampleInstanceEndpointWithCache(
|
||||
queryParams: Record<string, unknown>,
|
||||
fetchFromDatabase: () => Promise<InstanceData[]>
|
||||
): Promise<{ data: InstanceData[]; cached?: boolean; cache_age?: number }> {
|
||||
const cache = new CacheService(300); // 5 minutes default TTL
|
||||
|
||||
// Generate cache key from query parameters
|
||||
const cacheKey = cache.generateKey(queryParams);
|
||||
|
||||
// Try to get from cache
|
||||
const cached = await cache.get<InstanceData[]>(cacheKey);
|
||||
|
||||
if (cached) {
|
||||
console.log(`[API] Cache hit (age: ${cached.cache_age_seconds}s)`);
|
||||
return {
|
||||
data: cached.data,
|
||||
cached: true,
|
||||
cache_age: cached.cache_age_seconds,
|
||||
};
|
||||
}
|
||||
|
||||
// Cache miss - fetch from database
|
||||
console.log('[API] Cache miss - fetching from database');
|
||||
const data = await fetchFromDatabase();
|
||||
|
||||
// Determine TTL based on query complexity
|
||||
const hasFilters = Object.keys(queryParams).length > 0;
|
||||
const ttl = hasFilters ? 300 : 3600; // 5 min for filtered, 1 hour for full
|
||||
|
||||
// Store in cache
|
||||
await cache.set(cacheKey, data, ttl);
|
||||
|
||||
return {
|
||||
data,
|
||||
cached: false,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Example: Cache invalidation after sync
|
||||
*/
|
||||
async function exampleCacheInvalidationAfterSync(
|
||||
syncedProviders: string[]
|
||||
): Promise<void> {
|
||||
const cache = new CacheService();
|
||||
|
||||
// Invalidate all instance caches for synced providers
|
||||
for (const provider of syncedProviders) {
|
||||
// Note: Since Cloudflare Workers Cache API doesn't support pattern matching,
|
||||
// you need to maintain a list of active cache keys or use KV for indexing
|
||||
const key = cache.generateKey({ provider });
|
||||
await cache.delete(key);
|
||||
console.log(`[Sync] Invalidated cache for provider: ${provider}`);
|
||||
}
|
||||
|
||||
console.log('[Sync] Cache invalidation complete');
|
||||
}
|
||||
|
||||
export { exampleInstanceEndpointWithCache, exampleCacheInvalidationAfterSync };
|
||||
197
src/services/cache.ts
Normal file
197
src/services/cache.ts
Normal file
@@ -0,0 +1,197 @@
|
||||
/**
|
||||
* Cache Service - Cloudflare Workers Cache API wrapper
|
||||
*
|
||||
* Features:
|
||||
* - Cloudflare Workers Cache API integration (caches.default)
|
||||
* - TTL-based cache expiration with Cache-Control headers
|
||||
* - Cache key generation with sorted parameters
|
||||
* - Cache age tracking and metadata
|
||||
* - Graceful degradation on cache failures
|
||||
*
|
||||
* @example
|
||||
* const cache = new CacheService(300); // 5 minutes default TTL
|
||||
* await cache.set('key', data, 3600); // 1 hour TTL
|
||||
* const result = await cache.get<MyType>('key');
|
||||
* if (result) {
|
||||
* console.log(result.cache_age_seconds);
|
||||
* }
|
||||
*/
|
||||
|
||||
/**
|
||||
* Cache result structure with metadata
|
||||
*/
|
||||
export interface CacheResult<T> {
|
||||
/** Cached data */
|
||||
data: T;
|
||||
/** Cache hit indicator (always true for successful cache reads) */
|
||||
cached: true;
|
||||
/** Age of cached data in seconds */
|
||||
cache_age_seconds: number;
|
||||
/** ISO 8601 timestamp when data was cached */
|
||||
cached_at: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* CacheService - Manages cache operations using Cloudflare Workers Cache API
|
||||
*/
|
||||
export class CacheService {
|
||||
private cache: Cache;
|
||||
private defaultTTL: number;
|
||||
|
||||
/**
|
||||
* Initialize cache service
|
||||
*
|
||||
* @param ttlSeconds - Default TTL in seconds (default: 300 = 5 minutes)
|
||||
*/
|
||||
constructor(ttlSeconds = 300) {
|
||||
// Use Cloudflare Workers global caches.default
|
||||
this.cache = caches.default;
|
||||
this.defaultTTL = ttlSeconds;
|
||||
console.log(`[CacheService] Initialized with default TTL: ${ttlSeconds}s`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cached data by key
|
||||
*
|
||||
* @param key - Cache key (URL format)
|
||||
* @returns Cached data with metadata, or null if not found/expired
|
||||
*/
|
||||
async get<T>(key: string): Promise<CacheResult<T> | null> {
|
||||
try {
|
||||
const response = await this.cache.match(key);
|
||||
|
||||
if (!response) {
|
||||
console.log(`[CacheService] Cache miss: ${key}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Parse response body
|
||||
const body = await response.json() as {
|
||||
data: T;
|
||||
cached_at: string;
|
||||
};
|
||||
|
||||
// Calculate cache age
|
||||
const cachedAt = new Date(body.cached_at);
|
||||
const ageSeconds = Math.floor((Date.now() - cachedAt.getTime()) / 1000);
|
||||
|
||||
console.log(`[CacheService] Cache hit: ${key} (age: ${ageSeconds}s)`);
|
||||
|
||||
return {
|
||||
data: body.data,
|
||||
cached: true,
|
||||
cache_age_seconds: ageSeconds,
|
||||
cached_at: body.cached_at,
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
console.error('[CacheService] Cache read error:', error);
|
||||
// Graceful degradation: return null on cache errors
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set cached data with TTL
|
||||
*
|
||||
* @param key - Cache key (URL format)
|
||||
* @param data - Data to cache
|
||||
* @param ttlSeconds - TTL in seconds (defaults to defaultTTL)
|
||||
*/
|
||||
async set<T>(key: string, data: T, ttlSeconds?: number): Promise<void> {
|
||||
const ttl = ttlSeconds ?? this.defaultTTL;
|
||||
|
||||
try {
|
||||
// Create response with Cache-Control header for TTL
|
||||
const response = new Response(
|
||||
JSON.stringify({
|
||||
data,
|
||||
cached_at: new Date().toISOString(),
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Cache-Control': `public, max-age=${ttl}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
// Store in cache
|
||||
await this.cache.put(key, response);
|
||||
console.log(`[CacheService] Cached: ${key} (TTL: ${ttl}s)`);
|
||||
|
||||
} catch (error) {
|
||||
console.error('[CacheService] Cache write error:', error);
|
||||
// Graceful degradation: continue without caching
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete cached data by key
|
||||
*
|
||||
* @param key - Cache key (URL format)
|
||||
* @returns true if deleted, false if not found or error
|
||||
*/
|
||||
async delete(key: string): Promise<boolean> {
|
||||
try {
|
||||
const deleted = await this.cache.delete(key);
|
||||
|
||||
if (deleted) {
|
||||
console.log(`[CacheService] Deleted: ${key}`);
|
||||
} else {
|
||||
console.log(`[CacheService] Delete failed (not found): ${key}`);
|
||||
}
|
||||
|
||||
return deleted;
|
||||
|
||||
} catch (error) {
|
||||
console.error('[CacheService] Cache delete error:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate cache key from parameters
|
||||
* Uses URL format with sorted query parameters for consistency
|
||||
*
|
||||
* @param params - Query parameters as key-value pairs
|
||||
* @returns URL-formatted cache key
|
||||
*
|
||||
* @example
|
||||
* generateKey({ provider: 'linode', region: 'us-east' })
|
||||
* // → 'https://cache.internal/instances?provider=linode®ion=us-east'
|
||||
*/
|
||||
generateKey(params: Record<string, unknown>): string {
|
||||
// Sort parameters alphabetically for consistent cache keys
|
||||
const sorted = Object.keys(params)
|
||||
.sort()
|
||||
.map(k => `${k}=${params[k]}`)
|
||||
.join('&');
|
||||
|
||||
return `https://cache.internal/instances?${sorted}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidate all cache entries matching a pattern
|
||||
* Note: Cloudflare Workers Cache API doesn't support pattern matching
|
||||
* This method is for future implementation with KV or custom cache index
|
||||
*
|
||||
* @param pattern - Pattern to match (e.g., 'instances:*')
|
||||
*/
|
||||
async invalidatePattern(pattern: string): Promise<void> {
|
||||
console.warn(`[CacheService] Pattern invalidation not supported: ${pattern}`);
|
||||
// TODO: Implement with KV-based cache index if needed
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache statistics
|
||||
* Note: Cloudflare Workers Cache API doesn't expose statistics
|
||||
* This is a placeholder for monitoring integration
|
||||
*
|
||||
* @returns Cache statistics (not available in Cloudflare Workers)
|
||||
*/
|
||||
async getStats(): Promise<{ supported: boolean }> {
|
||||
console.warn('[CacheService] Cache statistics not available in Cloudflare Workers');
|
||||
return { supported: false };
|
||||
}
|
||||
}
|
||||
390
src/services/query.ts
Normal file
390
src/services/query.ts
Normal file
@@ -0,0 +1,390 @@
|
||||
/**
|
||||
* Query Service
|
||||
* Handles complex instance queries with JOIN operations, filtering, sorting, and pagination
|
||||
*/
|
||||
|
||||
import {
|
||||
InstanceQueryParams,
|
||||
InstanceResponse,
|
||||
InstanceData,
|
||||
InstanceType,
|
||||
Provider,
|
||||
Region,
|
||||
Pricing,
|
||||
} from '../types';
|
||||
|
||||
/**
|
||||
* Raw query result from database (flattened JOIN result)
|
||||
*/
|
||||
interface RawQueryResult {
|
||||
// instance_types fields
|
||||
id: number;
|
||||
provider_id: number;
|
||||
instance_id: string;
|
||||
instance_name: string;
|
||||
vcpu: number;
|
||||
memory_mb: number;
|
||||
storage_gb: number;
|
||||
transfer_tb: number | null;
|
||||
network_speed_gbps: number | null;
|
||||
gpu_count: number;
|
||||
gpu_type: string | null;
|
||||
instance_family: string | null;
|
||||
metadata: string | null;
|
||||
created_at: string;
|
||||
updated_at: string;
|
||||
|
||||
// provider fields (aliased)
|
||||
provider_name: string;
|
||||
provider_display_name: string;
|
||||
provider_api_base_url: string | null;
|
||||
provider_last_sync_at: string | null;
|
||||
provider_sync_status: string;
|
||||
provider_sync_error: string | null;
|
||||
provider_created_at: string;
|
||||
provider_updated_at: string;
|
||||
|
||||
// region fields (aliased)
|
||||
region_id: number;
|
||||
region_provider_id: number;
|
||||
region_code: string;
|
||||
region_name: string;
|
||||
country_code: string | null;
|
||||
latitude: number | null;
|
||||
longitude: number | null;
|
||||
region_available: number;
|
||||
region_created_at: string;
|
||||
region_updated_at: string;
|
||||
|
||||
// pricing fields (aliased)
|
||||
pricing_id: number;
|
||||
pricing_instance_type_id: number;
|
||||
pricing_region_id: number;
|
||||
hourly_price: number;
|
||||
monthly_price: number;
|
||||
currency: string;
|
||||
pricing_available: number;
|
||||
pricing_created_at: string;
|
||||
pricing_updated_at: string;
|
||||
}
|
||||
|
||||
export class QueryService {
|
||||
constructor(private db: D1Database) {}
|
||||
|
||||
/**
|
||||
* Query instances with filtering, sorting, and pagination
|
||||
*/
|
||||
async queryInstances(params: InstanceQueryParams): Promise<InstanceResponse> {
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
// Build SQL query and count query
|
||||
const { sql, countSql, bindings } = this.buildQuery(params);
|
||||
|
||||
console.log('[QueryService] Executing query:', sql);
|
||||
console.log('[QueryService] Bindings:', bindings);
|
||||
|
||||
// Execute count query for total results
|
||||
const countResult = await this.db
|
||||
.prepare(countSql)
|
||||
.bind(...bindings)
|
||||
.first<{ total: number }>();
|
||||
|
||||
const totalResults = countResult?.total ?? 0;
|
||||
|
||||
// Execute main query
|
||||
const result = await this.db
|
||||
.prepare(sql)
|
||||
.bind(...bindings)
|
||||
.all<RawQueryResult>();
|
||||
|
||||
// Transform flat results into structured InstanceData
|
||||
const instances = this.transformResults(result.results);
|
||||
|
||||
// Calculate pagination metadata
|
||||
const page = params.page ?? 1;
|
||||
const perPage = Math.min(params.limit ?? 50, 100); // Max 100
|
||||
const totalPages = Math.ceil(totalResults / perPage);
|
||||
const hasNext = page < totalPages;
|
||||
const hasPrevious = page > 1;
|
||||
|
||||
const queryTime = Date.now() - startTime;
|
||||
|
||||
return {
|
||||
data: instances,
|
||||
pagination: {
|
||||
current_page: page,
|
||||
total_pages: totalPages,
|
||||
per_page: perPage,
|
||||
total_results: totalResults,
|
||||
has_next: hasNext,
|
||||
has_previous: hasPrevious,
|
||||
},
|
||||
meta: {
|
||||
query_time_ms: queryTime,
|
||||
filters_applied: this.extractAppliedFilters(params),
|
||||
},
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('[QueryService] Query failed:', error);
|
||||
throw new Error(`Failed to query instances: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build SQL query with dynamic WHERE clause, ORDER BY, and pagination
|
||||
*/
|
||||
private buildQuery(params: InstanceQueryParams): {
|
||||
sql: string;
|
||||
countSql: string;
|
||||
bindings: unknown[];
|
||||
} {
|
||||
const conditions: string[] = [];
|
||||
const bindings: unknown[] = [];
|
||||
|
||||
// Base SELECT with JOIN
|
||||
const selectClause = `
|
||||
SELECT
|
||||
it.id, it.provider_id, it.instance_id, it.instance_name,
|
||||
it.vcpu, it.memory_mb, it.storage_gb, it.transfer_tb,
|
||||
it.network_speed_gbps, it.gpu_count, it.gpu_type,
|
||||
it.instance_family, it.metadata, it.created_at, it.updated_at,
|
||||
|
||||
p.name as provider_name,
|
||||
p.display_name as provider_display_name,
|
||||
p.api_base_url as provider_api_base_url,
|
||||
p.last_sync_at as provider_last_sync_at,
|
||||
p.sync_status as provider_sync_status,
|
||||
p.sync_error as provider_sync_error,
|
||||
p.created_at as provider_created_at,
|
||||
p.updated_at as provider_updated_at,
|
||||
|
||||
r.id as region_id,
|
||||
r.provider_id as region_provider_id,
|
||||
r.region_code,
|
||||
r.region_name,
|
||||
r.country_code,
|
||||
r.latitude,
|
||||
r.longitude,
|
||||
r.available as region_available,
|
||||
r.created_at as region_created_at,
|
||||
r.updated_at as region_updated_at,
|
||||
|
||||
pr.id as pricing_id,
|
||||
pr.instance_type_id as pricing_instance_type_id,
|
||||
pr.region_id as pricing_region_id,
|
||||
pr.hourly_price,
|
||||
pr.monthly_price,
|
||||
pr.currency,
|
||||
pr.available as pricing_available,
|
||||
pr.created_at as pricing_created_at,
|
||||
pr.updated_at as pricing_updated_at
|
||||
FROM instance_types it
|
||||
JOIN providers p ON it.provider_id = p.id
|
||||
JOIN pricing pr ON pr.instance_type_id = it.id
|
||||
JOIN regions r ON pr.region_id = r.id
|
||||
`;
|
||||
|
||||
// Provider filter (name or ID)
|
||||
if (params.provider && params.provider !== 'all') {
|
||||
conditions.push('p.name = ?');
|
||||
bindings.push(params.provider);
|
||||
}
|
||||
|
||||
// Region filter
|
||||
if (params.region_code) {
|
||||
conditions.push('r.region_code = ?');
|
||||
bindings.push(params.region_code);
|
||||
}
|
||||
|
||||
// Instance family filter
|
||||
if (params.family) {
|
||||
conditions.push('it.instance_family = ?');
|
||||
bindings.push(params.family);
|
||||
}
|
||||
|
||||
// vCPU range filter
|
||||
if (params.min_vcpu !== undefined) {
|
||||
conditions.push('it.vcpu >= ?');
|
||||
bindings.push(params.min_vcpu);
|
||||
}
|
||||
|
||||
if (params.max_vcpu !== undefined) {
|
||||
conditions.push('it.vcpu <= ?');
|
||||
bindings.push(params.max_vcpu);
|
||||
}
|
||||
|
||||
// Memory range filter (parameters in MB)
|
||||
if (params.min_memory !== undefined) {
|
||||
conditions.push('it.memory_mb >= ?');
|
||||
bindings.push(params.min_memory);
|
||||
}
|
||||
|
||||
if (params.max_memory !== undefined) {
|
||||
conditions.push('it.memory_mb <= ?');
|
||||
bindings.push(params.max_memory);
|
||||
}
|
||||
|
||||
// Price range filter (hourly price)
|
||||
if (params.min_price !== undefined) {
|
||||
conditions.push('pr.hourly_price >= ?');
|
||||
bindings.push(params.min_price);
|
||||
}
|
||||
|
||||
if (params.max_price !== undefined) {
|
||||
conditions.push('pr.hourly_price <= ?');
|
||||
bindings.push(params.max_price);
|
||||
}
|
||||
|
||||
// GPU filter
|
||||
if (params.has_gpu !== undefined) {
|
||||
if (params.has_gpu) {
|
||||
conditions.push('it.gpu_count > 0');
|
||||
} else {
|
||||
conditions.push('it.gpu_count = 0');
|
||||
}
|
||||
}
|
||||
|
||||
// Build WHERE clause
|
||||
const whereClause = conditions.length > 0 ? ' WHERE ' + conditions.join(' AND ') : '';
|
||||
|
||||
// Build ORDER BY clause
|
||||
let orderByClause = '';
|
||||
const sortBy = params.sort_by ?? 'hourly_price';
|
||||
const sortOrder = params.sort_order ?? 'asc';
|
||||
|
||||
// Map sort fields to actual column names
|
||||
const sortFieldMap: Record<string, string> = {
|
||||
price: 'pr.hourly_price',
|
||||
hourly_price: 'pr.hourly_price',
|
||||
monthly_price: 'pr.monthly_price',
|
||||
vcpu: 'it.vcpu',
|
||||
memory: 'it.memory_mb',
|
||||
memory_mb: 'it.memory_mb',
|
||||
name: 'it.instance_name',
|
||||
instance_name: 'it.instance_name',
|
||||
};
|
||||
|
||||
const sortColumn = sortFieldMap[sortBy] ?? 'pr.hourly_price';
|
||||
orderByClause = ` ORDER BY ${sortColumn} ${sortOrder.toUpperCase()}`;
|
||||
|
||||
// Build LIMIT and OFFSET
|
||||
const page = params.page ?? 1;
|
||||
const limit = Math.min(params.limit ?? 50, 100); // Max 100
|
||||
const offset = (page - 1) * limit;
|
||||
|
||||
bindings.push(limit);
|
||||
bindings.push(offset);
|
||||
|
||||
const limitClause = ' LIMIT ? OFFSET ?';
|
||||
|
||||
// Complete SQL query
|
||||
const sql = selectClause + whereClause + orderByClause + limitClause;
|
||||
|
||||
// Count query (without ORDER BY and LIMIT)
|
||||
const countSql = `
|
||||
SELECT COUNT(*) as total
|
||||
FROM instance_types it
|
||||
JOIN providers p ON it.provider_id = p.id
|
||||
JOIN pricing pr ON pr.instance_type_id = it.id
|
||||
JOIN regions r ON pr.region_id = r.id
|
||||
${whereClause}
|
||||
`;
|
||||
|
||||
// Bindings for count query (same filters, no limit/offset)
|
||||
const countBindings = bindings.slice(0, -2);
|
||||
|
||||
return { sql, countSql, bindings: countBindings };
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform flat query results into structured InstanceData objects
|
||||
*/
|
||||
private transformResults(results: RawQueryResult[]): InstanceData[] {
|
||||
return results.map((row) => {
|
||||
const provider: Provider = {
|
||||
id: row.provider_id,
|
||||
name: row.provider_name,
|
||||
display_name: row.provider_display_name,
|
||||
api_base_url: row.provider_api_base_url,
|
||||
last_sync_at: row.provider_last_sync_at,
|
||||
sync_status: row.provider_sync_status as 'pending' | 'syncing' | 'success' | 'error',
|
||||
sync_error: row.provider_sync_error,
|
||||
created_at: row.provider_created_at,
|
||||
updated_at: row.provider_updated_at,
|
||||
};
|
||||
|
||||
const region: Region = {
|
||||
id: row.region_id,
|
||||
provider_id: row.region_provider_id,
|
||||
region_code: row.region_code,
|
||||
region_name: row.region_name,
|
||||
country_code: row.country_code,
|
||||
latitude: row.latitude,
|
||||
longitude: row.longitude,
|
||||
available: row.region_available,
|
||||
created_at: row.region_created_at,
|
||||
updated_at: row.region_updated_at,
|
||||
};
|
||||
|
||||
const pricing: Pricing = {
|
||||
id: row.pricing_id,
|
||||
instance_type_id: row.pricing_instance_type_id,
|
||||
region_id: row.pricing_region_id,
|
||||
hourly_price: row.hourly_price,
|
||||
monthly_price: row.monthly_price,
|
||||
currency: row.currency,
|
||||
available: row.pricing_available,
|
||||
created_at: row.pricing_created_at,
|
||||
updated_at: row.pricing_updated_at,
|
||||
};
|
||||
|
||||
const instanceType: InstanceType = {
|
||||
id: row.id,
|
||||
provider_id: row.provider_id,
|
||||
instance_id: row.instance_id,
|
||||
instance_name: row.instance_name,
|
||||
vcpu: row.vcpu,
|
||||
memory_mb: row.memory_mb,
|
||||
storage_gb: row.storage_gb,
|
||||
transfer_tb: row.transfer_tb,
|
||||
network_speed_gbps: row.network_speed_gbps,
|
||||
gpu_count: row.gpu_count,
|
||||
gpu_type: row.gpu_type,
|
||||
instance_family: row.instance_family as any,
|
||||
metadata: row.metadata,
|
||||
created_at: row.created_at,
|
||||
updated_at: row.updated_at,
|
||||
};
|
||||
|
||||
return {
|
||||
...instanceType,
|
||||
provider,
|
||||
region,
|
||||
pricing,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract applied filters for metadata response
|
||||
*/
|
||||
private extractAppliedFilters(params: InstanceQueryParams): Partial<InstanceQueryParams> {
|
||||
const filters: Partial<InstanceQueryParams> = {};
|
||||
|
||||
if (params.provider) filters.provider = params.provider;
|
||||
if (params.region_code) filters.region_code = params.region_code;
|
||||
if (params.family) filters.family = params.family;
|
||||
if (params.min_vcpu !== undefined) filters.min_vcpu = params.min_vcpu;
|
||||
if (params.max_vcpu !== undefined) filters.max_vcpu = params.max_vcpu;
|
||||
if (params.min_memory !== undefined) filters.min_memory = params.min_memory;
|
||||
if (params.max_memory !== undefined) filters.max_memory = params.max_memory;
|
||||
if (params.min_price !== undefined) filters.min_price = params.min_price;
|
||||
if (params.max_price !== undefined) filters.max_price = params.max_price;
|
||||
if (params.has_gpu !== undefined) filters.has_gpu = params.has_gpu;
|
||||
if (params.sort_by) filters.sort_by = params.sort_by;
|
||||
if (params.sort_order) filters.sort_order = params.sort_order;
|
||||
|
||||
return filters;
|
||||
}
|
||||
}
|
||||
362
src/services/sync.ts
Normal file
362
src/services/sync.ts
Normal file
@@ -0,0 +1,362 @@
|
||||
/**
|
||||
* Sync Service - Orchestrates synchronization of cloud provider data
|
||||
*
|
||||
* Features:
|
||||
* - Multi-provider synchronization (Linode, Vultr, AWS)
|
||||
* - Stage-based sync process with error recovery
|
||||
* - Provider status tracking and reporting
|
||||
* - Batch operations for efficiency
|
||||
*
|
||||
* @example
|
||||
* const orchestrator = new SyncOrchestrator(db, vault);
|
||||
* const report = await orchestrator.syncAll(['linode']);
|
||||
*/
|
||||
|
||||
import { VaultClient } from '../connectors/vault';
|
||||
import { LinodeConnector } from '../connectors/linode';
|
||||
import { VultrConnector } from '../connectors/vultr';
|
||||
import { AWSConnector } from '../connectors/aws';
|
||||
import { RepositoryFactory } from '../repositories';
|
||||
import type {
|
||||
ProviderSyncResult,
|
||||
SyncReport,
|
||||
RegionInput,
|
||||
InstanceTypeInput,
|
||||
PricingInput,
|
||||
} from '../types';
|
||||
|
||||
/**
|
||||
* Synchronization stages
|
||||
*/
|
||||
export enum SyncStage {
|
||||
INIT = 'init',
|
||||
FETCH_CREDENTIALS = 'fetch_credentials',
|
||||
FETCH_REGIONS = 'fetch_regions',
|
||||
FETCH_INSTANCES = 'fetch_instances',
|
||||
NORMALIZE = 'normalize',
|
||||
PERSIST = 'persist',
|
||||
VALIDATE = 'validate',
|
||||
COMPLETE = 'complete',
|
||||
}
|
||||
|
||||
/**
|
||||
* Cloud provider connector interface
|
||||
* All provider connectors must implement this interface
|
||||
*/
|
||||
export interface CloudConnector {
|
||||
/** Authenticate and validate credentials */
|
||||
authenticate(): Promise<void>;
|
||||
|
||||
/** Fetch all available regions */
|
||||
getRegions(): Promise<RegionInput[]>;
|
||||
|
||||
/** Fetch all instance types */
|
||||
getInstanceTypes(): Promise<InstanceTypeInput[]>;
|
||||
|
||||
/** Fetch pricing data for instances and regions */
|
||||
getPricing(instanceTypeIds: number[], regionIds: number[]): Promise<PricingInput[]>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync orchestrator for managing provider synchronization
|
||||
*/
|
||||
export class SyncOrchestrator {
|
||||
private repos: RepositoryFactory;
|
||||
|
||||
constructor(
|
||||
db: D1Database,
|
||||
private vault: VaultClient
|
||||
) {
|
||||
this.repos = new RepositoryFactory(db);
|
||||
console.log('[SyncOrchestrator] Initialized');
|
||||
}
|
||||
|
||||
/**
|
||||
* Synchronize a single provider
|
||||
*
|
||||
* @param provider - Provider name (linode, vultr, aws)
|
||||
* @returns Sync result with statistics and error information
|
||||
*/
|
||||
async syncProvider(provider: string): Promise<ProviderSyncResult> {
|
||||
const startTime = Date.now();
|
||||
let stage = SyncStage.INIT;
|
||||
|
||||
console.log(`[SyncOrchestrator] Starting sync for provider: ${provider}`);
|
||||
|
||||
try {
|
||||
// Stage 1: Initialize - Update provider status to syncing
|
||||
stage = SyncStage.INIT;
|
||||
await this.repos.providers.updateSyncStatus(provider, 'syncing');
|
||||
console.log(`[SyncOrchestrator] ${provider} → ${stage}`);
|
||||
|
||||
// Stage 2: Fetch credentials from Vault
|
||||
stage = SyncStage.FETCH_CREDENTIALS;
|
||||
const connector = await this.createConnector(provider);
|
||||
await connector.authenticate();
|
||||
console.log(`[SyncOrchestrator] ${provider} → ${stage}`);
|
||||
|
||||
// Get provider record
|
||||
const providerRecord = await this.repos.providers.findByName(provider);
|
||||
if (!providerRecord) {
|
||||
throw new Error(`Provider not found in database: ${provider}`);
|
||||
}
|
||||
|
||||
// Stage 3: Fetch regions from provider API
|
||||
stage = SyncStage.FETCH_REGIONS;
|
||||
const regions = await connector.getRegions();
|
||||
console.log(`[SyncOrchestrator] ${provider} → ${stage} (${regions.length} regions)`);
|
||||
|
||||
// Stage 4: Fetch instance types from provider API
|
||||
stage = SyncStage.FETCH_INSTANCES;
|
||||
const instances = await connector.getInstanceTypes();
|
||||
console.log(`[SyncOrchestrator] ${provider} → ${stage} (${instances.length} instances)`);
|
||||
|
||||
// Stage 5: Normalize data (add provider_id)
|
||||
stage = SyncStage.NORMALIZE;
|
||||
const normalizedRegions = regions.map(r => ({
|
||||
...r,
|
||||
provider_id: providerRecord.id,
|
||||
}));
|
||||
const normalizedInstances = instances.map(i => ({
|
||||
...i,
|
||||
provider_id: providerRecord.id,
|
||||
}));
|
||||
console.log(`[SyncOrchestrator] ${provider} → ${stage}`);
|
||||
|
||||
// Stage 6: Persist to database
|
||||
stage = SyncStage.PERSIST;
|
||||
const regionsCount = await this.repos.regions.upsertMany(
|
||||
providerRecord.id,
|
||||
normalizedRegions
|
||||
);
|
||||
const instancesCount = await this.repos.instances.upsertMany(
|
||||
providerRecord.id,
|
||||
normalizedInstances
|
||||
);
|
||||
|
||||
// Fetch pricing data - need instance and region IDs from DB
|
||||
const dbRegions = await this.repos.regions.findByProvider(providerRecord.id);
|
||||
const dbInstances = await this.repos.instances.findByProvider(providerRecord.id);
|
||||
|
||||
const regionIds = dbRegions.map(r => r.id);
|
||||
const instanceTypeIds = dbInstances.map(i => i.id);
|
||||
|
||||
const pricing = await connector.getPricing(instanceTypeIds, regionIds);
|
||||
const pricingCount = await this.repos.pricing.upsertMany(pricing);
|
||||
|
||||
console.log(`[SyncOrchestrator] ${provider} → ${stage} (regions: ${regionsCount}, instances: ${instancesCount}, pricing: ${pricingCount})`);
|
||||
|
||||
// Stage 7: Validate
|
||||
stage = SyncStage.VALIDATE;
|
||||
if (regionsCount === 0 || instancesCount === 0) {
|
||||
throw new Error('No data was synced - possible API or parsing issue');
|
||||
}
|
||||
console.log(`[SyncOrchestrator] ${provider} → ${stage}`);
|
||||
|
||||
// Stage 8: Complete - Update provider status to success
|
||||
stage = SyncStage.COMPLETE;
|
||||
await this.repos.providers.updateSyncStatus(provider, 'success');
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
console.log(`[SyncOrchestrator] ${provider} → ${stage} (${duration}ms)`);
|
||||
|
||||
return {
|
||||
provider,
|
||||
success: true,
|
||||
regions_synced: regionsCount,
|
||||
instances_synced: instancesCount,
|
||||
pricing_synced: pricingCount,
|
||||
duration_ms: duration,
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
const duration = Date.now() - startTime;
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
|
||||
console.error(`[SyncOrchestrator] ${provider} failed at ${stage}:`, error);
|
||||
|
||||
// Update provider status to error
|
||||
try {
|
||||
await this.repos.providers.updateSyncStatus(provider, 'error', errorMessage);
|
||||
} catch (statusError) {
|
||||
console.error(`[SyncOrchestrator] Failed to update provider status:`, statusError);
|
||||
}
|
||||
|
||||
return {
|
||||
provider,
|
||||
success: false,
|
||||
regions_synced: 0,
|
||||
instances_synced: 0,
|
||||
pricing_synced: 0,
|
||||
duration_ms: duration,
|
||||
error: errorMessage,
|
||||
error_details: {
|
||||
stage,
|
||||
message: errorMessage,
|
||||
stack: error instanceof Error ? error.stack : undefined,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Synchronize all providers
|
||||
* Runs synchronizations in parallel for efficiency
|
||||
*
|
||||
* @param providers - Array of provider names to sync (defaults to all supported providers)
|
||||
* @returns Complete sync report with statistics
|
||||
*/
|
||||
async syncAll(providers = ['linode', 'vultr', 'aws']): Promise<SyncReport> {
|
||||
const startedAt = new Date().toISOString();
|
||||
const startTime = Date.now();
|
||||
|
||||
console.log(`[SyncOrchestrator] Starting sync for providers: ${providers.join(', ')}`);
|
||||
|
||||
// Run all provider syncs in parallel
|
||||
const results = await Promise.allSettled(
|
||||
providers.map(p => this.syncProvider(p))
|
||||
);
|
||||
|
||||
// Extract results
|
||||
const providerResults: ProviderSyncResult[] = results.map((result, index) => {
|
||||
if (result.status === 'fulfilled') {
|
||||
return result.value;
|
||||
} else {
|
||||
// Handle rejected promises
|
||||
const provider = providers[index];
|
||||
const errorMessage = result.reason instanceof Error
|
||||
? result.reason.message
|
||||
: 'Unknown error';
|
||||
|
||||
console.error(`[SyncOrchestrator] ${provider} promise rejected:`, result.reason);
|
||||
|
||||
return {
|
||||
provider,
|
||||
success: false,
|
||||
regions_synced: 0,
|
||||
instances_synced: 0,
|
||||
pricing_synced: 0,
|
||||
duration_ms: 0,
|
||||
error: errorMessage,
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
const completedAt = new Date().toISOString();
|
||||
const totalDuration = Date.now() - startTime;
|
||||
|
||||
// Calculate summary
|
||||
const successful = providerResults.filter(r => r.success);
|
||||
const failed = providerResults.filter(r => !r.success);
|
||||
|
||||
const summary = {
|
||||
total_providers: providers.length,
|
||||
successful_providers: successful.length,
|
||||
failed_providers: failed.length,
|
||||
total_regions: providerResults.reduce((sum, r) => sum + r.regions_synced, 0),
|
||||
total_instances: providerResults.reduce((sum, r) => sum + r.instances_synced, 0),
|
||||
total_pricing: providerResults.reduce((sum, r) => sum + r.pricing_synced, 0),
|
||||
};
|
||||
|
||||
const report: SyncReport = {
|
||||
success: failed.length === 0,
|
||||
started_at: startedAt,
|
||||
completed_at: completedAt,
|
||||
total_duration_ms: totalDuration,
|
||||
providers: providerResults,
|
||||
summary,
|
||||
};
|
||||
|
||||
console.log(`[SyncOrchestrator] Sync complete:`, {
|
||||
total: summary.total_providers,
|
||||
success: summary.successful_providers,
|
||||
failed: summary.failed_providers,
|
||||
duration: `${totalDuration}ms`,
|
||||
});
|
||||
|
||||
return report;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create connector for a specific provider
|
||||
*
|
||||
* @param provider - Provider name
|
||||
* @returns Connector instance for the provider
|
||||
* @throws Error if provider is not supported
|
||||
*/
|
||||
private async createConnector(provider: string): Promise<CloudConnector> {
|
||||
switch (provider.toLowerCase()) {
|
||||
case 'linode': {
|
||||
const connector = new LinodeConnector(this.vault);
|
||||
return {
|
||||
authenticate: () => connector.initialize(),
|
||||
getRegions: async () => {
|
||||
const regions = await connector.fetchRegions();
|
||||
const providerRecord = await this.repos.providers.findByName('linode');
|
||||
const providerId = providerRecord?.id ?? 0;
|
||||
return regions.map(r => connector.normalizeRegion(r, providerId));
|
||||
},
|
||||
getInstanceTypes: async () => {
|
||||
const instances = await connector.fetchInstanceTypes();
|
||||
const providerRecord = await this.repos.providers.findByName('linode');
|
||||
const providerId = providerRecord?.id ?? 0;
|
||||
return instances.map(i => connector.normalizeInstance(i, providerId));
|
||||
},
|
||||
getPricing: async () => {
|
||||
// Linode pricing is included in instance types
|
||||
return [];
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
case 'vultr': {
|
||||
const connector = new VultrConnector(this.vault);
|
||||
return {
|
||||
authenticate: () => connector.initialize(),
|
||||
getRegions: async () => {
|
||||
const regions = await connector.fetchRegions();
|
||||
const providerRecord = await this.repos.providers.findByName('vultr');
|
||||
const providerId = providerRecord?.id ?? 0;
|
||||
return regions.map(r => connector.normalizeRegion(r, providerId));
|
||||
},
|
||||
getInstanceTypes: async () => {
|
||||
const plans = await connector.fetchPlans();
|
||||
const providerRecord = await this.repos.providers.findByName('vultr');
|
||||
const providerId = providerRecord?.id ?? 0;
|
||||
return plans.map(p => connector.normalizeInstance(p, providerId));
|
||||
},
|
||||
getPricing: async () => {
|
||||
// Vultr pricing is included in plans
|
||||
return [];
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
case 'aws': {
|
||||
const connector = new AWSConnector(this.vault);
|
||||
return {
|
||||
authenticate: () => connector.initialize(),
|
||||
getRegions: async () => {
|
||||
const regions = await connector.fetchRegions();
|
||||
const providerRecord = await this.repos.providers.findByName('aws');
|
||||
const providerId = providerRecord?.id ?? 0;
|
||||
return regions.map(r => connector.normalizeRegion(r, providerId));
|
||||
},
|
||||
getInstanceTypes: async () => {
|
||||
const instances = await connector.fetchInstanceTypes();
|
||||
const providerRecord = await this.repos.providers.findByName('aws');
|
||||
const providerId = providerRecord?.id ?? 0;
|
||||
return instances.map(i => connector.normalizeInstance(i, providerId));
|
||||
},
|
||||
getPricing: async () => {
|
||||
// AWS pricing is included in instance types from ec2.shop
|
||||
return [];
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported provider: ${provider}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user