This repository has been archived by the owner on Jul 23, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 3
/
main.ts
146 lines (131 loc) · 5.31 KB
/
main.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import createHttpClient from '@/api/http/client'
import getProjectData from '@/api/http/get-project-data'
import createWsClient from '@/api/websocket/client'
import pushDeploymentLogs from '@/push-deployment-logs'
import pushPluginLogs from '@/push-plugin-logs'
import { App, HttpClient, VectorProcess, WsClient } from '@/types'
import getEnv from '@/utils/get-env'
import parseProjectIds from '@/utils/parse-project-ids'
import requireEnv from '@/utils/require-env'
import spawn from '@/vector/spawn'
import write from '@/vector/write'
import dotenv from 'dotenv'
dotenv.config()
const RAILWAY_API_HTTP_ENDPOINT = getEnv(
'RAILWAY_API_HTTP_ENDPOINT',
'https://backboard.railway.app/graphql/v2',
)
const RAILWAY_API_WS_ENDPOINT = getEnv(
'RAILWAY_API_WS_ENDPOINT',
'wss://backboard.railway.app/graphql/v2',
)
const RAILWAY_PROJECT_IDS = requireEnv('RAILWAY_PROJECT_IDS')
const RAILWAY_API_TOKEN = requireEnv('RAILWAY_API_TOKEN')
const VECTOR_BIN_PATH = requireEnv('VECTOR_BIN_PATH')
// @TODO: Move this to env?
const REFRESH_INTERVAL_SECONDS = 60 * 15
import configureVector from './vector/configure'
/**
* This is the main event loop that refreshes a project's deployments and
* plugins every n seconds (where n=`REFRESH_INTERVAL_SECONDS`) and pushes
* the logs of each deployment/plugin into Vector.
*
* The major limitation of this approach is Railway's rate limit - each
* account is limited to 1k requests per day [0]. At the current default
* refresh interval of 15 mins, that works out to 96 requests every 24
* hours for project data per-project. I'm not sure if the WS subscriptions
* are subject to this rate limit. In theory, rate limits should apply
* per-request instead of per-WS message, which implies each WS connection
* counts toward the rate limit. This effectively means that we're making
* `(1 + (services+plugins)) * (86400 / REFRESH_INTERVAL_SECONDS)` requests
* every 24hours for each project.
*
* An alternative/better way of doing this is through Railway's webhooks.
* We can subscribe to new deployments. However, this approach misses out
* on plugin creation/deletion: when a plugin is created, logs will only
* get pushed after restarting this service. Conversely, when a plugin is
* deleted, we'll have no way of knowing (would subscribing to logs of a
* deleted plugin error out?).
*
* [0] https://docs.railway.app/reference/public-api#rate-limits
*/
const main = async () => {
const ENVIRONMENT = process.env.ENVIRONMENT ?? 'development'
console.info(`⚡ railway-chord is starting!`)
console.info(`⚡ environment: ${ENVIRONMENT}`)
// Vector sinks are configured dynamically based on the presence of a sink's
// API token in env. i.e. if there's a LOGTAIL_TOKEN provided, inject the
// Logtail sink into Vector config; if there's a DATADOG_TOKEN provided,
// inject the Datadog sink into Vector config; and so on.
const LOGTAIL_TOKEN = process.env.LOGTAIL_TOKEN ?? null
const DATADOG_TOKEN = process.env.DATADOG_TOKEN ?? null
const DATADOG_SITE = process.env.DATADOG_SITE ?? null
const vectorCfg = configureVector(
ENVIRONMENT !== 'production',
LOGTAIL_TOKEN,
DATADOG_TOKEN,
DATADOG_SITE,
)
// Start Vector first. We want to crash early; there's no point in making
// network requests to Railway API if Vector can't start.
console.info(`⚙️ Using Vector binary: ${VECTOR_BIN_PATH}`)
const vector = spawn(VECTOR_BIN_PATH, vectorCfg.contents)
write(vector, '>>> ping from railway-chord')
console.info(`✅ Vector started`)
console.info(`✅ Enabled sinks:`)
vectorCfg.enabled.forEach((s) => {
console.info(` - ${s}`)
})
console.info(`⚙️ Using Railway HTTP endpoint: ${RAILWAY_API_HTTP_ENDPOINT}`)
console.info(`⚙️ Using Railway WS endpoint: ${RAILWAY_API_WS_ENDPOINT}`)
const projectIds = parseProjectIds(RAILWAY_PROJECT_IDS)
const httpClient = createHttpClient(
RAILWAY_API_HTTP_ENDPOINT,
RAILWAY_API_TOKEN,
)
const wsClient = createWsClient(RAILWAY_API_WS_ENDPOINT, RAILWAY_API_TOKEN)
// Start event loop
await runEventLoop(httpClient, wsClient, vector, projectIds)
setInterval(async () => {
await runEventLoop(httpClient, wsClient, vector, projectIds)
}, REFRESH_INTERVAL_SECONDS * 1000)
}
const refreshProjects = async (
httpClient: HttpClient,
projectIds: App.ProjectId[],
) => {
return await Promise.all(
projectIds.map(async (id) => {
const project = await getProjectData(httpClient, id)
return {
projectId: id,
plugins: project.plugins,
deployments: project.deployments,
}
}),
)
}
const runEventLoop = async (
httpClient: HttpClient,
wsClient: WsClient,
vector: VectorProcess,
projectIds: App.ProjectId[],
) => {
console.info(`🔄 Refreshing projects!`)
const projects = await refreshProjects(httpClient, projectIds)
console.info(`✅ Enabling for:`)
projects.forEach(async ({ deployments, plugins, projectId }) => {
console.info(` > projectId=${projectId}`)
deployments.forEach(async (d) => {
console.info(` - deployment=${d.staticUrl}, deploymentId=${d.id}`)
pushDeploymentLogs(wsClient, vector, d, new Date())
})
plugins.forEach(async (p) => {
console.info(
` - plugin=${p.name}, pluginId=${p.id}, env=${p.environmentName}`,
)
pushPluginLogs(wsClient, vector, p, new Date())
})
})
}
main()