diff --git a/.golangci.yml b/.golangci.yml index d41929cf76..bf117c6e36 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -20,13 +20,12 @@ linters: - misspell # - staticcheck - unconvert - - typecheck + # - typecheck # - unused - - staticcheck - - bidichk - - durationcheck - - exportloopref - - whitespace + # - bidichk + # - durationcheck + # - exportloopref + # - whitespace # - gosec # - errcheck diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go index 24c10fc680..10bea4f87f 100644 --- a/cmd/devp2p/internal/ethtest/transaction.go +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -29,7 +29,7 @@ import ( "github.com/celo-org/celo-blockchain/params" ) -//var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") +// var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") func (s *Suite) sendSuccessfulTxs(t *utesting.T, isCelo67 bool) error { diff --git a/cmd/ethkey/utils.go b/cmd/ethkey/utils.go index b5903b7f5f..c82021ef44 100644 --- a/cmd/ethkey/utils.go +++ b/cmd/ethkey/utils.go @@ -50,7 +50,8 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string { // that can be safely used to calculate a signature from. // // The hash is calulcated as -// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). +// +// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // // This gives context to the signed message and prevents signing of transactions. func signHash(data []byte) []byte { diff --git a/consensus/istanbul/announce/val_enode_db.go b/consensus/istanbul/announce/val_enode_db.go index a43899c24c..bae0c9aad2 100644 --- a/consensus/istanbul/announce/val_enode_db.go +++ b/consensus/istanbul/announce/val_enode_db.go @@ -388,8 +388,9 @@ func (vet *ValidatorEnodeDB) UpdateQueryEnodeStats(valEnodeEntries []*istanbul.A // upsert will update or insert a validator enode entry given that the existing entry // is older (determined by the version) than the new one // TODO - In addition to modifying the val_enode_db, this function also will disconnect -// and/or connect the corresponding validator connenctions. The validator connections -// should be managed be a separate thread (see https://github.com/celo-org/celo-blockchain/issues/607) +// +// and/or connect the corresponding validator connenctions. The validator connections +// should be managed be a separate thread (see https://github.com/celo-org/celo-blockchain/issues/607) func (vet *ValidatorEnodeDB) upsert(valEnodeEntries []*istanbul.AddressEntry, onNewEntry func(batch *leveldb.Batch, entry db.GenericEntry) error, onUpdatedEntry func(batch *leveldb.Batch, existingEntry db.GenericEntry, newEntry db.GenericEntry) error) error { diff --git a/consensus/istanbul/config.go b/consensus/istanbul/config.go index c521f3ef84..5bff564572 100644 --- a/consensus/istanbul/config.go +++ b/consensus/istanbul/config.go @@ -102,7 +102,7 @@ var DefaultConfig = &Config{ LoadTestCSVFile: "", // disable by default } -//ApplyParamsChainConfigToConfig applies the istanbul config values from params.chainConfig to the istanbul.Config config +// ApplyParamsChainConfigToConfig applies the istanbul config values from params.chainConfig to the istanbul.Config config func ApplyParamsChainConfigToConfig(chainConfig *params.ChainConfig, config *Config) error { if chainConfig.Istanbul.Epoch != 0 { if chainConfig.Istanbul.Epoch < MinEpochSize { diff --git a/consensus/istanbul/core/backlog.go b/consensus/istanbul/core/backlog.go index b8b11a71fb..4122e79cfc 100644 --- a/consensus/istanbul/core/backlog.go +++ b/consensus/istanbul/core/backlog.go @@ -93,9 +93,9 @@ func (c *core) checkMessage(msgCode uint64, msgView *istanbul.View) error { // MsgBacklog represent a backlog of future messages // It works by: -// - allowing storing messages with "store()" -// - call eventListener when a backlog message becomes "present" -// - updates its notion of time/state with updateState() +// - allowing storing messages with "store()" +// - call eventListener when a backlog message becomes "present" +// - updates its notion of time/state with updateState() type MsgBacklog interface { // store atttemps to store the message in the backlog // it might not do so, if the message is too far in the future diff --git a/consensus/istanbul/core/types.go b/consensus/istanbul/core/types.go index 63390818f4..244a1dbc62 100644 --- a/consensus/istanbul/core/types.go +++ b/consensus/istanbul/core/types.go @@ -77,9 +77,10 @@ func (s State) String() string { } // Cmp compares s and y and returns: -// -1 if s is the previous state of y -// 0 if s and y are the same state -// +1 if s is the next state of y +// +// -1 if s is the previous state of y +// 0 if s and y are the same state +// +1 if s is the next state of y func (s State) Cmp(y State) int { if uint64(s) < uint64(y) { return -1 diff --git a/consensus/istanbul/proxy/proxy_set.go b/consensus/istanbul/proxy/proxy_set.go index c45afa4209..374f321d63 100644 --- a/consensus/istanbul/proxy/proxy_set.go +++ b/consensus/istanbul/proxy/proxy_set.go @@ -29,7 +29,8 @@ import ( // proxySet defines the set of proxies that the validator is aware of and // validator/proxy assignments. // WARNING: None of this object's functions are threadsafe, so it's -// the user's responsibility to ensure that. +// +// the user's responsibility to ensure that. type proxySet struct { proxiesByID map[enode.ID]*Proxy // all proxies known by this node, whether or not they are peered valAssignments *valAssignments // the mappings of proxy<->remote validators diff --git a/consensus/istanbul/proxy/validator_assignment.go b/consensus/istanbul/proxy/validator_assignment.go index 17998620db..fc33428deb 100644 --- a/consensus/istanbul/proxy/validator_assignment.go +++ b/consensus/istanbul/proxy/validator_assignment.go @@ -30,7 +30,8 @@ import ( // If a validator is assigned to a nil proxy, then that means that it's // not assigned yet. // WARNING: None of this object's functions are threadsafe, so it's -// the user's responsibility to ensure that. +// +// the user's responsibility to ensure that. type valAssignments struct { valToProxy map[common.Address]*enode.ID // map of validator address -> proxy assignment ID proxyToVals map[enode.ID]map[common.Address]struct{} // map of proxy ID to set of validator addresses @@ -126,7 +127,8 @@ func (h hasher) Sum64(data []byte) uint64 { // Validator <-> proxy pairings are recalculated every time a proxy or validator // is added/removed // WARNING: None of this object's functions are threadsafe, so it's -// the user's responsibility to ensure that. +// +// the user's responsibility to ensure that. type consistentHashingPolicy struct { c *consistent.Consistent // used for consistent hashing logger log.Logger diff --git a/consensus/istanbul/types.go b/consensus/istanbul/types.go index c7e6bf8c98..787d45c13c 100644 --- a/consensus/istanbul/types.go +++ b/consensus/istanbul/types.go @@ -113,9 +113,10 @@ func (v *View) String() string { } // Cmp compares v and y and returns: -// -1 if v < y -// 0 if v == y -// +1 if v > y +// +// -1 if v < y +// 0 if v == y +// +1 if v > y func (v *View) Cmp(y *View) int { if v.Sequence.Cmp(y.Sequence) != 0 { return v.Sequence.Cmp(y.Sequence) diff --git a/consensus/istanbul/uptime/autofix_test.go b/consensus/istanbul/uptime/autofix_test.go index fdc65fba80..f8db51decd 100644 --- a/consensus/istanbul/uptime/autofix_test.go +++ b/consensus/istanbul/uptime/autofix_test.go @@ -11,11 +11,9 @@ import ( "github.com/stretchr/testify/assert" ) -func copy(b *builder) *builder { +func copyBuilder(b *builder) *builder { h := make([]*types.Header, len(b.headersAdded)) - for i, hd := range b.headersAdded { - h[i] = hd - } + copy(h, b.headersAdded) return &builder{ epoch: b.epoch, epochSize: b.epochSize, @@ -72,7 +70,7 @@ func TestAddOnFirstOfEpoch(t *testing.T) { computeV: computeV, computeE: computeE, } - b2 := copy(b) // copy of builder, for the compute test + b2 := copyBuilder(b) // copy of builder, for the compute test assert.True(t, istanbul.IsFirstBlockOfEpoch(101, 100)) provider := &headers{t: t, epochSize: 100, reqs: []headersReq{}} af := NewAutoFixBuilder(b, provider) @@ -107,7 +105,7 @@ func TestAddManyFirstOfEpoch(t *testing.T) { computeV: computeV, computeE: computeE, } - b2 := copy(b) // copy of builder, for the compute test + b2 := copyBuilder(b) // copy of builder, for the compute test assert.True(t, istanbul.IsFirstBlockOfEpoch(101, 100)) last := header(105) providerResult := []*types.Header{header(101), header(102), header(103), header(104), header(105)} @@ -137,7 +135,7 @@ func TestContinueSequentialAdd(t *testing.T) { computeV: computeV, computeE: computeE, } - b2 := copy(b) // copy of builder, for the compute test + b2 := copyBuilder(b) // copy of builder, for the compute test assert.True(t, istanbul.IsFirstBlockOfEpoch(101, 100)) provider := &headers{t: t, epochSize: 100, reqs: []headersReq{}} af := NewAutoFixBuilder(b, provider) @@ -169,7 +167,7 @@ func TestSequentialAddFork(t *testing.T) { computeV: computeV, computeE: computeE, } - b2 := copy(b) // copy of builder, for the compute test + b2 := copyBuilder(b) // copy of builder, for the compute test assert.True(t, istanbul.IsFirstBlockOfEpoch(101, 100)) last := header(103) providerResult := []*types.Header{header(101), header(102), last} @@ -204,7 +202,7 @@ func TestRewind(t *testing.T) { computeV: computeV, computeE: computeE, } - b2 := copy(b) // copy of builder, for the compute test + b2 := copyBuilder(b) // copy of builder, for the compute test assert.True(t, istanbul.IsFirstBlockOfEpoch(101, 100)) last := header(102) providerResult := []*types.Header{header(101), header(102)} @@ -238,7 +236,7 @@ func TestDoNothing(t *testing.T) { computeV: computeV, computeE: computeE, } - b2 := copy(b) // copy of builder, for the compute test + b2 := copyBuilder(b) // copy of builder, for the compute test assert.True(t, istanbul.IsFirstBlockOfEpoch(101, 100)) provider := &headers{t: t, epochSize: 100, reqs: []headersReq{}} af := NewAutoFixBuilder(b, provider) @@ -268,7 +266,7 @@ func TestSameHeightRebuild(t *testing.T) { computeV: computeV, computeE: computeE, } - b2 := copy(b) // copy of builder, for the compute test + b2 := copyBuilder(b) // copy of builder, for the compute test assert.True(t, istanbul.IsFirstBlockOfEpoch(101, 100)) providerResult := []*types.Header{header(101), header(102), last} provider := &headers{t: t, epochSize: 100, reqs: []headersReq{ @@ -299,7 +297,7 @@ func TestAdvance(t *testing.T) { computeV: computeV, computeE: computeE, } - b2 := copy(b) + b2 := copyBuilder(b) assert.True(t, istanbul.IsFirstBlockOfEpoch(101, 100)) last := header(106) providerResult := []*types.Header{pivot, header(104), header(105), header(106)} @@ -334,7 +332,7 @@ func TestAdvanceFork(t *testing.T) { computeV: computeV, computeE: computeE, } - b2 := copy(b) + b2 := copyBuilder(b) assert.True(t, istanbul.IsFirstBlockOfEpoch(101, 100)) last := header(106) providerResult1 := []*types.Header{forkPivot, header(104), header(105), header(106)} diff --git a/consensus/istanbul/uptime/monitor.go b/consensus/istanbul/uptime/monitor.go index f599c4a40b..ab63c6d0d1 100644 --- a/consensus/istanbul/uptime/monitor.go +++ b/consensus/istanbul/uptime/monitor.go @@ -22,10 +22,7 @@ type Uptime struct { func (u *Uptime) Copy() *Uptime { entriesCopy := make([]UptimeEntry, len(u.Entries)) - for i := 0; i < len(u.Entries); i++ { - // UptimeEntry is not a pointer, copy. - entriesCopy[i] = u.Entries[i] - } + copy(entriesCopy, u.Entries) return &Uptime{ // No need to copy the header LatestHeader: u.LatestHeader, diff --git a/contracts/config/version.go b/contracts/config/version.go index a02f741502..b2222d9e3c 100644 --- a/contracts/config/version.go +++ b/contracts/config/version.go @@ -10,10 +10,9 @@ type VersionInfo struct { // Cmp compares x and y and returns: // -// -1 if x < y -// 0 if x == y -// +1 if x > y -// +// -1 if x < y +// 0 if x == y +// +1 if x > y func cmp(x uint64, y uint64) int { if x < y { return -1 diff --git a/core/rawdb/doc.go b/core/rawdb/doc.go index 8da8908671..2e15ef61c0 100644 --- a/core/rawdb/doc.go +++ b/core/rawdb/doc.go @@ -33,6 +33,5 @@ modified the items added to the freezer must be considered final (I.E, no chance of a re-org) currently that is ensured by only freezing blocks at least 90000 behind the head block. This 90000 threshold is referred to as the 'FullImmutabilityThreshold'. - */ package rawdb diff --git a/core/state_transition.go b/core/state_transition.go index b15ec065f5..5c37c0daa8 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -286,6 +286,7 @@ func (st *StateTransition) payFees(espresso bool) error { // For native token(CELO) as feeCurrency: // - Pre-Espresso: it ensures balance >= GasPrice * gas + gatewayFee (1) // - Post-Espresso: it ensures balance >= GasFeeCap * gas + value + gatewayFee (2) +// // For non-native tokens(cUSD, cEUR, ...) as feeCurrency: // - Pre-Espresso: it ensures balance > GasPrice * gas + gatewayFee (3) // - Post-Espresso: it ensures balance >= GasFeeCap * gas + gatewayFee (4) diff --git a/core/tx_pool.go b/core/tx_pool.go index 67cd62d762..9a3003d6fc 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -1719,6 +1719,7 @@ func (pool *TxPool) demoteUnexecutables() { // For native token(CELO) as feeCurrency: // - Pre-Espresso: it ensures balance >= GasPrice * gas + value + gatewayFee (1) // - Post-Espresso: it ensures balance >= GasFeeCap * gas + value + gatewayFee (2) +// // For non-native tokens(cUSD, cEUR, ...) as feeCurrency: // - Pre-Espresso: it ensures balance > GasPrice * gas + gatewayFee (3) // - Post-Espresso: it ensures balance >= GasFeeCap * gas + gatewayFee (4) diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 615c2cbd02..2a2ae348f4 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -967,7 +967,8 @@ func TestTransactionGapFilling(t *testing.T) { // When we change TestChangeConfig to enable Donut this test will need: // (a) to set pool.donut = false at its start (so we can add unprotected transactions) // (b) different functions to generate protected vs unprotected transactions, since we will -// need to update transaction() and the others to use replay protection +// +// need to update transaction() and the others to use replay protection func TestPoolReAcceptingUnprotectedTxsFromEspresso(t *testing.T) { t.Parallel() diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index eb3a3f0af9..e06edb0415 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -229,7 +229,7 @@ func benchmarkPrecompiled(addr string, test precompiledTest, bench *testing.B) { return } if common.Bytes2Hex(res) != test.Expected { - bench.Error(fmt.Sprintf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res))) + bench.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res)) return } }) diff --git a/eth/downloader/doc.go b/eth/downloader/doc.go index f7baca5112..1629022cd8 100644 --- a/eth/downloader/doc.go +++ b/eth/downloader/doc.go @@ -3,7 +3,7 @@ Package downloader handles downloading data from other nodes for sync. Sync refers to the process of catching up with other nodes, once caught up nodes use a different process to maintain their syncronisation. -There are a few different modes for syncing +# There are a few different modes for syncing Full: Get all the blocks and apply them all to build the chain state @@ -41,7 +41,7 @@ Lightest: Like light but downloads only one header per epoch, which on mainnet means one header out of every 17280 headers. This is particularly fast only takes 20 seconds or so to get synced. -Sync process detail +# Sync process detail Syncing is initiated with one peer (see eth.loop), the peer selected to sync with is the one with the highest total difficulty of all peers (see @@ -107,24 +107,23 @@ fetchReceipts And one to process the headers. (processHeaders) -If fast sync { - start a routine to process the fast sync content (processFastSyncContent) -} -If full syncing { + If fast sync { + start a routine to process the fast sync content (processFastSyncContent) + } - start a routine to process the full sync content (processFullSyncContent) -} +If full syncing { + start a routine to process the full sync content (processFullSyncContent) + } These goroutines form a pipeline where the downloaded data flows as follows. - -> fetchBodies -> processFullSyncContent - / \ -fetchHeaders -> processHeaders \ - \ \ - -> fetchReceipts --> processFastSyncContent - + -> fetchBodies -> processFullSyncContent + / \ + fetchHeaders -> processHeaders \ + \ \ + -> fetchReceipts --> processFastSyncContent fetchHeaders @@ -136,14 +135,14 @@ the chain of the main peer they are syncing against. Whether fetching skeleton headers or not requests for headers are done in batches of up to 192 (MaxHeaderFetch) headers. -If lightest sync { - fetch just epoch headers till current epoch then fetch all subsequent headers. (no skeleton) -} else { - fetch headers using the skeleton approach, until no more skeleton headers - are returned then switch to requesting all subsequent headers from the - peer. -} + If lightest sync { + fetch just epoch headers till current epoch then fetch all subsequent headers. (no skeleton) + } else { + fetch headers using the skeleton approach, until no more skeleton headers + are returned then switch to requesting all subsequent headers from the + peer. + } Wait for headers to be received. @@ -159,12 +158,13 @@ processHeaders Waits to receive headers from fetchHeaders inserts the received headers into the header chain. -If full sync { - request blocks for inserted headers. (fetchBodies) -} -If fast sync { - request blocks and receipts for inserted headers. (fetchBodies & fetchReceipts) -} + If full sync { + request blocks for inserted headers. (fetchBodies) + } + + If fast sync { + request blocks and receipts for inserted headers. (fetchBodies & fetchReceipts) + } processFastSyncContent @@ -179,18 +179,18 @@ Results before the pivot are inserted with BlockChain.InsertReceiptChain (which inserts receipts, because in fast sync most blocks are not processed) and those after the pivot -If the pivot has completed syncing { - Inserts the results after the pivot with, BlockChain.InsertChain and exits. -} else { - Start the process again prepending the results after the pivot point to the - newly fetched results. (Note that if the pivot point is subsequently - updated those results will be processed as fast sync results and inserted - via BlockChain.InsertReceiptChain, but there seems to be a problem with our - current implementation that means that the pivot would have to get 2 days - old before it would be updated, so actually it looks like the list of - result s will grow a lot during this time could be an OOM consideration) -} - + If the pivot has completed syncing { + Inserts the results after the pivot with, BlockChain.InsertChain and exits. + } else { + + Start the process again prepending the results after the pivot point to the + newly fetched results. (Note that if the pivot point is subsequently + updated those results will be processed as fast sync results and inserted + via BlockChain.InsertReceiptChain, but there seems to be a problem with our + current implementation that means that the pivot would have to get 2 days + old before it would be updated, so actually it looks like the list of + result s will grow a lot during this time could be an OOM consideration) + } fetchBodies diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 0fa39e47f0..cc52f331c4 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -1460,22 +1460,22 @@ func (d *Downloader) fetchReceipts(from uint64) error { // various callbacks to handle the slight differences between processing them. // // The instrumentation parameters: -// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) -// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) -// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) -// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) -// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) -// - pending: task callback for the number of requests still needing download (detect completion/non-completability) -// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) -// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) -// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) -// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) -// - fetch: network callback to actually send a particular download request to a physical remote peer -// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) -// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) -// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks -// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) -// - kind: textual label of the type being downloaded to display in log messages +// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) +// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) +// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) +// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) +// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) +// - pending: task callback for the number of requests still needing download (detect completion/non-completability) +// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) +// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) +// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) +// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) +// - fetch: network callback to actually send a particular download request to a physical remote peer +// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) +// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) +// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks +// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) +// - kind: textual label of the type being downloaded to display in log messages func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool), fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, diff --git a/les/peer.go b/les/peer.go index 68623377eb..65f6a2cb71 100644 --- a/les/peer.go +++ b/les/peer.go @@ -1014,7 +1014,7 @@ func (p *clientPeer) SendEtherbaseRLP(reqID uint64, etherbase common.Address) *r return &reply{p.rw, EtherbaseMsg, reqID, data} } -//ReplyGatewayFee creates reply with gateway fee that was requested +// ReplyGatewayFee creates reply with gateway fee that was requested func (p *clientPeer) ReplyGatewayFee(reqID uint64, resp GatewayFeeInformation) *reply { data, _ := rlp.EncodeToBytes(resp) return &reply{p.rw, GatewayFeeMsg, reqID, data} diff --git a/les/server.go b/les/server.go index 02219526c5..ed8cdda957 100644 --- a/les/server.go +++ b/les/server.go @@ -313,7 +313,7 @@ func (s *LesServer) capacityManagement() { } } -//This sends messages to light client peers whenever this light server updates gateway fee. +// This sends messages to light client peers whenever this light server updates gateway fee. func (s *LesServer) BroadcastGatewayFeeInfo() error { lightClientPeerNodes := s.peers.allPeers() if s.handler.gatewayFee.Cmp(common.Big0) < 0 { diff --git a/params/version.go b/params/version.go index d6b6659091..48d075c540 100644 --- a/params/version.go +++ b/params/version.go @@ -47,7 +47,8 @@ var VersionWithMeta = func() string { // ArchiveVersion holds the textual version string used for Geth archives. // e.g. "1.8.11-dea1ce05" for stable releases, or -// "1.8.13-unstable-21c059b6" for unstable releases +// +// "1.8.13-unstable-21c059b6" for unstable releases func ArchiveVersion(gitCommit string) string { vsn := Version if VersionMeta != "stable" { diff --git a/signer/rules/rules_test.go b/signer/rules/rules_test.go index ec909fb62b..d838e025e0 100644 --- a/signer/rules/rules_test.go +++ b/signer/rules/rules_test.go @@ -244,7 +244,7 @@ func (d *dummyUI) OnApprovedTx(tx ethapi.SignTransactionResult) { func (d *dummyUI) OnSignerStartup(info core.StartupInfo) { } -//TestForwarding tests that the rule-engine correctly dispatches requests to the next caller +// TestForwarding tests that the rule-engine correctly dispatches requests to the next caller func TestForwarding(t *testing.T) { js := "" @@ -546,7 +546,7 @@ func (d *dontCallMe) OnApprovedTx(tx ethapi.SignTransactionResult) { d.t.Fatalf("Did not expect next-handler to be called") } -//TestContextIsCleared tests that the rule-engine does not retain variables over several requests. +// TestContextIsCleared tests that the rule-engine does not retain variables over several requests. // if it does, that would be bad since developers may rely on that to store data, // instead of using the disk-based data storage func TestContextIsCleared(t *testing.T) {