Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DRAFT] SHARD-1166: Add archiver whitelisting #362

Open
wants to merge 1 commit into
base: dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/config/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,7 @@ const SERVER_CONFIG: StrictServerConfiguration = {
profiler: false,
minMultiSigRequiredForEndpoints: 2,
minMultiSigRequiredForGlobalTxs: 2,
minMultiSigRequiredForArchiverConfig: 2,
robustQueryDebug: false,
forwardTXToSyncingNeighbors: false,
recordAcceptedTx: false,
Expand Down
154 changes: 152 additions & 2 deletions src/p2p/Archivers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import { Result, ResultAsync } from 'neverthrow'
import { Utils } from '@shardus/types'
import { arch } from 'os'
import { checkGossipPayload } from '../utils/GossipValidation'
import { DevSecurityLevel } from '../shardus/shardus-types'

const clone = rfdc()

Expand All @@ -49,6 +50,8 @@ export let recipients: Map<
P2P.ArchiversTypes.DataRecipient | any
>

let allowedArchivers: Array<{ ip: string, port: number, publicKey: string }> = []
let allowedArchiversInterval: NodeJS.Timeout | null = null
let joinRequests: P2P.ArchiversTypes.Request[]
let leaveRequests: P2P.ArchiversTypes.Request[]
let receiptForwardInterval: Timeout | null = null
Expand All @@ -58,6 +61,7 @@ export let connectedSockets = {}
let lastSentCycle = -1
let lastTimeForwardedArchivers = []
export const RECEIPT_FORWARD_INTERVAL_MS = 5000
const ALLOWED_ARCHIVERS_UPDATE_INTERVAL_MS = 10000
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we can change this to every minute


export enum DataRequestTypes {
SUBSCRIBE = 'SUBSCRIBE',
Expand Down Expand Up @@ -101,6 +105,20 @@ export function init() {
reset()
resetLeaveRequests()
registerRoutes()
getAllowedArchivers().then(archivers => {
allowedArchivers = archivers
})

// Set up interval to fetch allowed archivers every minute
allowedArchiversInterval = setInterval(async () => {
try {
allowedArchivers = await getAllowedArchivers()
} catch (err) {
if (logFlags.important_as_fatal) {
console.error('Periodic update: failed to update allowedArchivers:', err)
}
}
}, ALLOWED_ARCHIVERS_UPDATE_INTERVAL_MS) // 10 seconds

if (config.p2p.experimentalSnapshot && !receiptForwardInterval) {
receiptForwardInterval = setInterval(forwardReceipts, RECEIPT_FORWARD_INTERVAL_MS)
Expand All @@ -126,6 +144,124 @@ export function init() {
}
}

let lastSeenCounter = 0;

async function getAllowedArchivers(): Promise<Array<{
ip: string
port: number
publicKey: string
}>> {
if (archivers.size > 0) {
// Get config from a random existing archiver
const randomArchiver = getRandomArchiver()
if (!randomArchiver) return []

try {
const response = await fetch(`http://${randomArchiver.ip}:${randomArchiver.port}/allowed-archivers`)
if (!response.ok) {
return []
}
const data = await response.json() as {
allowedArchivers: Array<{
ip: string
port: number
publicKey: string
}>
signatures: Array<{
owner: string
sig: string
}>
counter: number
}

// Verify signatures
const verifyRawPayload = {
allowedArchivers: data.allowedArchivers,
counter: data.counter
}

// Verify counter is increasing
if (data.counter <= lastSeenCounter) {
p2pLogger.error('Invalid counter in allowed-archivers response - counter must be increasing')
return []
}

const isValid = Context.stateManager.app.verifyMultiSigs(
verifyRawPayload,
data.signatures,
config.debug.multisigKeys,
config.debug.minMultiSigRequiredForArchiverConfig,
DevSecurityLevel.High
)

if (!isValid) {
p2pLogger.error('Invalid signatures in allowed-archivers response')
return []
}

// Update last seen counter only after successful verification
lastSeenCounter = data.counter

if (data.allowedArchivers && data.allowedArchivers.length > 0) {
p2pLogger.info(`Found ${data.allowedArchivers.length} whitelisted archivers from ${randomArchiver.ip}:${randomArchiver.port}`)
return data.allowedArchivers
}
} catch (err) {
p2pLogger.warn(`Failed to get archiver config from ${randomArchiver.ip}:${randomArchiver.port}`)
return []
}
} else {
// Use existingArchivers from config when no archivers exist yet
const existingArchivers = config.p2p.existingArchivers
if (existingArchivers.length > 0) {
for (const archiver of existingArchivers) {
try {
const response = await fetch(`http://${archiver.ip}:${archiver.port}/allowed-archivers`)
if (!response.ok) {
continue
}
const data = await response.json() as {
allowedArchivers: Array<{
ip: string
port: number
publicKey: string
}>
signatures: Array<{
owner: string
sig: string
}>
}

// Verify signatures
const verifyRawPayload = {
allowedArchivers: data.allowedArchivers
}

const isValid = Context.stateManager.app.verifyMultiSigs(
verifyRawPayload,
data.signatures,
config.debug.multisigKeys, // Use the multisig keys from the config
config.debug.minMultiSigRequiredForArchiverConfig,
DevSecurityLevel.High
)
if (!isValid) {
p2pLogger.error('Invalid signatures in allowed-archivers response')
break // Exit the loop if the signatures are invalid
}
if (data.allowedArchivers && data.allowedArchivers.length > 0) {
p2pLogger.info(`Found ${data.allowedArchivers.length} allowed archivers from ${archiver.ip}:${archiver.port}`)
return data.allowedArchivers
}
} catch (err) {
p2pLogger.warn(`Failed to get allowed archivers from ${archiver.ip}:${archiver.port}`)
continue
}
}
}
}
return []
}

export function reset() {
resetJoinRequests()
}
Expand Down Expand Up @@ -169,6 +305,14 @@ export function updateRecord(txs: P2P.ArchiversTypes.Txs, record: P2P.CycleCreat
.filter((request) => request.requestType === P2P.ArchiversTypes.RequestTypes.LEAVE)
.map((leaveRequest) => leaveRequest.nodeInfo)

// Remove archivers that are not in the allowed list
for (const existingArchiver of archivers.values()) {
if (allowedArchivers.length > 0 && !allowedArchivers.some(allowed => allowed.publicKey === existingArchiver.publicKey) &&
!leavingArchivers.some(leaving => leaving.publicKey === existingArchiver.publicKey)) {
leavingArchivers.push(existingArchiver)
}
}

if (logFlags.console)
console.log(
`Archiver before updating record: Cycle ${CycleCreator.currentCycle}, Quarter: ${CycleCreator.currentQuarter}`,
Expand Down Expand Up @@ -269,6 +413,12 @@ export function addArchiverJoinRequest(joinRequest: P2P.ArchiversTypes.Request,
}
}

const isPublicKeyWhitelisted = allowedArchivers.some(archiver => archiver.publicKey === joinRequest.nodeInfo.publicKey);
if (!isPublicKeyWhitelisted && archivers.size > 0) {
warn('addJoinRequest: Archiver not found in the allowed list')
return { success: false, reason: 'Archiver not found in the allowed list' }
}

if (archivers.size > 0) {
// Check the archiver version from dapp
if (Context.config.p2p.validateArchiverAppData) {
Expand Down Expand Up @@ -1083,8 +1233,8 @@ export function registerRoutes() {
delete queryRequest.tag
let data: {
[key: number]:
| StateManager.StateManagerTypes.ReceiptMapResult[]
| StateManager.StateManagerTypes.StatsClump
| StateManager.StateManagerTypes.ReceiptMapResult[]
| StateManager.StateManagerTypes.StatsClump
}
if (queryRequest.type === 'RECEIPT_MAP') {
data = getReceiptMap(queryRequest.lastData)
Expand Down
6 changes: 4 additions & 2 deletions src/shardus/shardus-types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -923,7 +923,7 @@ export interface ServerConfiguration {
/** not an actual percent but 0-1 value or multiplication */
rotationMaxRemovePercent: number
/** enable sync floor */
syncFloorEnabled: boolean
syncFloorEnabled: boolean
/** additional support for more syncing nodes. not an actual percent but 0-1 value or multiplication */
syncingMaxAddPercent: number
/** how many node should be syncing at any given time */
Expand All @@ -933,7 +933,7 @@ export interface ServerConfiguration {
/** The max number of nodes added to `activated` list in cycleRecord each cycle */
allowActivePerCycleRecover: number
/** enable active node rotation recovery */
activeRecoveryEnabled: boolean
activeRecoveryEnabled: boolean
/** should a checking node use a random proxy to run the down test */
useProxyForDownCheck: boolean
/** The number of checker nodes to ask to investigate whether a node that is potentially lost */
Expand Down Expand Up @@ -1082,6 +1082,8 @@ export interface ServerConfiguration {
minMultiSigRequiredForEndpoints: number
/** minimum approvals needed for global txs using multisig */
minMultiSigRequiredForGlobalTxs: number
/** minimum approvals needed for archiver config using multisig */
minMultiSigRequiredForArchiverConfig: number
/** dump extra data for robust query even if in error/fatal logggin only mode */
robustQueryDebug: boolean
/** pretty sure we don't want this ever but making a config so we can AB test as needed */
Expand Down
Loading