Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove refute #347

Draft
wants to merge 6 commits into
base: 1.15.4
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/config/deprecated/server.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
"maxJoinedPerCycle": 1,
"maxSyncingPerCycle": 5,
"maxRotatedPerCycle": 1,
"maxProblematicNodeRemovalsPerCycle": 1,
"firstCycleJoin": 10,
"maxPercentOfDelta": 40,
"minScaleReqsNeeded": 5,
Expand Down
1 change: 1 addition & 0 deletions src/config/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ const SERVER_CONFIG: StrictServerConfiguration = {
maxSyncTimeFloor: 1200,
maxNodeForSyncTime: 9,
maxRotatedPerCycle: 1,
maxProblematicNodeRemovalsPerCycle: 1,
firstCycleJoin: 10,
maxPercentOfDelta: 40,
minScaleReqsNeeded: 5,
Expand Down
11 changes: 11 additions & 0 deletions src/debug/debug.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import Trie from 'trie-prefix-tree'
import { isDebugModeMiddleware, isDebugModeMiddlewareMedium } from '../network/debugMiddleware'
import { nestedCountersInstance } from '../utils/nestedCounters'
import { logFlags } from '../logger'
import { ProblemNodeTracker } from '../p2p/ProblemNodeHandler'
import { currentCycle } from '../p2p/CycleCreator'
const tar = require('tar-fs')
const fs = require('fs')

Expand Down Expand Up @@ -133,6 +135,15 @@ class Debug {
}
return res.json({ success: true })
})
this.network.registerExternalGet('debug_problemNodeTrackerDump', isDebugModeMiddleware, (req, res) => {
try {
const tracker = ProblemNodeTracker.getInstance()
const dump = tracker.getDump(currentCycle)
return res.json({ success: true, data: dump })
} catch (e) {
return res.json({ success: false, error: e.message })
}
})
}
}

Expand Down
171 changes: 71 additions & 100 deletions src/p2p/ModeSystemFuncs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import * as CycleCreator from './CycleCreator'
import * as CycleChain from './CycleChain'
import { logFlags } from '../logger'
import { Utils } from '@shardus/types'
import { getProblematicNodes } from './ProblemNodeHandler'

interface ToAcceptResult {
add: number
Expand Down Expand Up @@ -239,147 +240,117 @@ export function calculateToAcceptV2(prevRecord: P2P.CycleCreatorTypes.CycleRecor
return { add, remove }
}

const getApoptosizedNodes = (txs: P2P.RotationTypes.Txs & P2P.ApoptosisTypes.Txs): string[] => {
const apoptosizedNodesList = []
for (const request of txs.apoptosis) {
const node = NodeList.nodes.get(request.id)
if (node) {
apoptosizedNodesList.push(node.id)
}
}
return apoptosizedNodesList
}

// need to think about and maybe ask Omar about using prev record for determining mode, could use next record

/** Returns the number of expired nodes and the list of removed nodes using calculateToAcceptV2 */
/** Returns the number of expired nodes and the list of removed nodes using calculateToAcceptV2
* this list includes problematic nodes + expired nodes.
*/
export function getExpiredRemovedV2(
prevRecord: P2P.CycleCreatorTypes.CycleRecord,
lastLoggedCycle: number,
txs: P2P.RotationTypes.Txs & P2P.ApoptosisTypes.Txs,
info: (...msg: string[]) => void
): { expired: number; removed: string[] } {
const start = prevRecord.start
let expired = 0
const removed = []
): { problematic: number; expired: number; removed: string[] } {
// clear state from last run
NodeList.potentiallyRemoved.clear()

// Don't expire/remove any if nodeExpiryAge is negative
if (config.p2p.nodeExpiryAge < 0) return { expired, removed }
if (config.p2p.nodeExpiryAge < 0) return { problematic: 0, expired: 0, removed: [] }

const active = NodeList.activeByIdOrder.length

const start = prevRecord.start
let expireTimestamp = start - config.p2p.nodeExpiryAge
if (expireTimestamp < 0) expireTimestamp = 0

// initialize the max amount to remove to our config value
// let maxRemove = config.p2p.maxRotatedPerCycle //TODO check if this is needed

// calculate the target number of nodes
const { add, remove } = calculateToAcceptV2(prevRecord)
nestedCountersInstance.countEvent(
'p2p',
`results of getExpiredRemovedV2.calculateToAcceptV2: add: ${add}, remove: ${remove}`
)
// initialize `scaleDownRemove` to at most any "excess" nodes more than
// desired. it can't be less than zero.
const maxRemove = remove

//only let the scale factor impart a partial influence based on scaleInfluenceForShrink
// const scaledAmountToShrink = getScaledAmountToShrink() //TODO check if this is needed

//limit the scale down by scaledAmountToShrink
// if (scaleDownRemove > scaledAmountToShrink) {
// scaleDownRemove = scaledAmountToShrink
// }

//maxActiveNodesToRemove is a percent of the active nodes that is set as a 0-1 value in maxShrinkMultiplier
//this is to prevent the network from shrinking too fast
//make sure the value is at least 1
// const maxActiveNodesToRemove = Math.max(Math.floor(config.p2p.maxShrinkMultiplier * active), 1)
// get list of nodes that have been requested to be removed
const apoptosizedNodesList = getApoptosizedNodes(txs)
const numApoptosizedRemovals = apoptosizedNodesList.length

// Get the set of problematic nodes
const problematicWithApoptosizedNodes = getProblematicNodes(prevRecord)
// filter out apoptosized nodes from the problematic nodes
const problematicNodes = problematicWithApoptosizedNodes.filter(id => !apoptosizedNodesList.includes(id))
const numProblematicRemovals = Math.min(
problematicNodes.length,
config.p2p.maxProblematicNodeRemovalsPerCycle || 1,
)

// get list of expired nodes
const expirationTimeThreshold = Math.max(start - config.p2p.nodeExpiryAge, 0)
// expired, non-apoptosized, non-syncing nodes
const expiredNodes = NodeList.byJoinOrder.filter(node => node.activeTimestamp <= expirationTimeThreshold && node.status !== 'syncing' && !apoptosizedNodesList.includes(node.id)).map(node => node.id)
const numExpiredNodes = expiredNodes.length

// we can remove `remove` nodes, but we *must* remove the number of apoptosized nodes,
// as well as the number of problematic nodes (determined by config.p2p.maxProblematicNodeRemovalsPerCycle, if any)
// the remainder is the number of expired nodes we can remove this cycle
const numExpiredRemovals = remove - numApoptosizedRemovals - numProblematicRemovals

const cycle = CycleChain.newest.counter
if (cycle > lastLoggedCycle && maxRemove > 0) {

if (cycle > lastLoggedCycle && remove > 0) {
lastLoggedCycle = cycle
info(
'scale down dump:' +
Utils.safeStringify({
cycle,
scaleFactor: CycleCreator.scaleFactor,
// scaleDownRemove,
// maxActiveNodesToRemove,
desired: prevRecord.desired,
active,
// scaledAmountToShrink,
maxRemove,
expired,
maxRemove: remove,
expired: numExpiredNodes,
})
)
}

//TODO not sure we still need the following block anymore
nestedCountersInstance.countEvent(
'p2p',
`results of getExpiredRemovedV2: numApoptosizedRemovals: ${numApoptosizedRemovals}, numProblematicRemovals: ${numProblematicRemovals}, numExpiredRemovals: ${numExpiredRemovals}, removed: ${remove}`
)

// Allows the network to scale down even if node rotation is turned off
// if (maxRemove < 1) {
// maxRemove = scaleDownRemove
// } else {
// //else pick the higher of the two
// maxRemove = Math.max(maxRemove, scaleDownRemove)
// }
// array that hold all the nodes to remove
// maintains the sort order provided in activeByIdOrder
const toRemoveUnsorted = problematicNodes
.slice(0, numProblematicRemovals)
.concat(
expiredNodes.slice(0, numExpiredRemovals)
)

// never remove more nodes than the difference between active and desired
// if (maxRemove > active - desired) maxRemove = active - desired // [TODO] - this is handled inside calculateToAcceptV2
// maintains the sort order provided in activeByIdOrder
const toRemove = NodeList.byJoinOrder
.filter(node => toRemoveUnsorted.includes(node.id))

// final clamp of max remove, but only if it is more than amountToShrink
// to avoid messing up the calculation above this next part can only make maxRemove smaller.
// maxActiveNodesToRemove is a percent of the active nodes that is set as a 0-1 value in maxShrinkMultiplier
// if (maxRemove > config.p2p.amountToShrink && maxRemove > maxActiveNodesToRemove) {
// yes, this max could be baked in earlier, but I like it here for clarity
// maxRemove = Math.max(config.p2p.amountToShrink, maxActiveNodesToRemove)
// }

//TODO end of block

nestedCountersInstance.countEvent(
'p2p',
`results of getExpiredRemovedV2: scaleDownRemove: maxRemove: ${maxRemove}`
)
// get list of nodes that have been requested to be removed
const apoptosizedNodesList = []
for (const request of txs.apoptosis) {
const node = NodeList.nodes.get(request.id)
if (node) {
apoptosizedNodesList.push(node.id)
}
}

// Oldest node has index 0
for (const node of NodeList.byJoinOrder) {
// don't count syncing nodes in our expired count
if (node.status === 'syncing') continue

// once we've hit the first node that's not expired, stop counting
// updated to use activeTimestamp as this is when the node has gone active for us
if (node.activeTimestamp > expireTimestamp) break

// otherwise, count this node as expired
expired++

// Add it to removed if it isn't full
if (config.p2p.uniqueRemovedIds) {
// Limit the number of nodes that can be removed by removed + apoptosized
if (removed.length + apoptosizedNodesList.length < maxRemove) {
NodeList.potentiallyRemoved.add(node.id)
if (!apoptosizedNodesList.includes(node.id)) {
insertSorted(removed, node.id)
}
} else break
} else {
if (removed.length < maxRemove) {
NodeList.potentiallyRemoved.add(node.id)
insertSorted(removed, node.id)
}
}
const removed = [];
// Process nodes for removal
for (const node of toRemove) {
nestedCountersInstance.countEvent(
'p2p',
`getExpiredRemovedV2: adding node to removed: ${node.id}`
)
NodeList.potentiallyRemoved.add(node.id)
insertSorted(removed, node.id)
}

return { expired, removed }
}

/** Returns a linearly interpolated value between `amountToShrink` and the same
* multiplied by a `scaleFactor`. The result depends on the
* `scaleInfluenceForShrink` */
function getScaledAmountToShrink(): number {
const nonScaledAmount = config.p2p.amountToShrink
const scaledAmount = config.p2p.amountToShrink * CycleCreator.scaleFactor
const scaleInfluence = config.p2p.scaleInfluenceForShrink
return Math.floor(lerp(nonScaledAmount, scaledAmount, scaleInfluence))
}
return { problematic: problematicNodes.length, expired: numExpiredNodes, removed }
}
Loading