Skip to content

Commit b60038a

Browse files
committed
[ETCM-103] Minor renaming and cleanup
1 parent 3f9f261 commit b60038a

File tree

6 files changed

+83
-72
lines changed

6 files changed

+83
-72
lines changed

src/main/resources/application.conf

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -261,17 +261,17 @@ mantis {
261261
blockchains {
262262
network = "etc"
263263

264-
etc { include "chains/etc-chain.conf"}
264+
etc {include "chains/etc-chain.conf"}
265265

266-
eth { include "chains/eth-chain.conf"}
266+
eth {include "chains/eth-chain.conf"}
267267

268-
mordor { include "chains/mordor-chain.conf"}
268+
mordor {include "chains/mordor-chain.conf"}
269269

270-
ropsten { include "chains/ropsten-chain.conf"}
270+
ropsten {include "chains/ropsten-chain.conf"}
271271

272-
test { include "chains/test-chain.conf"}
272+
test {include "chains/test-chain.conf"}
273273

274-
testnet-internal { include "chains/testnet-internal-chain.conf"}
274+
testnet-internal {include "chains/testnet-internal-chain.conf"}
275275
}
276276

277277
sync {
@@ -397,17 +397,16 @@ mantis {
397397
# Current Size of ETC state trie is aroud 150M Nodes, so 200M is set to have some reserve
398398
# If the number of elements inserted into bloom filter would be significally higher that expected, then number
399399
# of false positives would rise which would degrade performance of state sync
400-
state-sync-bloomFilter-size = 200000000
401-
400+
state-sync-bloom-filter-size = 200000000
402401

403402
# Max number of mpt nodes held in memory in state sync, before saving them into database
404403
# 100k is around 60mb (each key-value pair has around 600bytes)
405-
state-sync-persistBatch-size = 100000
404+
state-sync-persist-batch-size = 100000
406405

407406
# If new pivot block received from network will be less than fast sync current pivot block, the re-try to chose new
408407
# pivot will be scheduler after this time. Avarage block time in etc/eth is around 15s so after this time, most of
409408
# network peers should have new best block
410-
pivot-block-reSchedule-interval = 15.seconds
409+
pivot-block-reschedule-interval = 15.seconds
411410

412411
# If for most network peers, the following condition will be true:
413412
# (peer.bestKnownBlock - pivot-block-offset) - node.curentPivotBlock > max-pivot-age

src/main/scala/io/iohk/ethereum/blockchain/sync/FastSync.scala

Lines changed: 40 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ class FastSync(
7272
def startWithState(syncState: SyncState): Unit = {
7373
log.info(s"Starting with existing state and asking for new pivot block")
7474
val syncingHandler = new SyncingHandler(syncState)
75-
syncingHandler.askForPivotBlockUpdate(NodeRestart)
75+
syncingHandler.askForPivotBlockUpdate(SyncRestart)
7676
}
7777

7878
def startFromScratch(): Unit = {
@@ -205,39 +205,48 @@ class FastSync(
205205
scheduler.scheduleOnce(syncConfig.pivotBlockReScheduleInterval, self, UpdatePivotBlock(updateReason))
206206
}
207207

208+
private def stalePivotAfterRestart(
209+
newPivot: BlockHeader,
210+
currentState: SyncState,
211+
updateReason: PivotBlockUpdateReason
212+
): Boolean = {
213+
newPivot.number == currentState.pivotBlock.number && updateReason.isSyncRestart
214+
}
215+
216+
private def newPivotIsGoodEnough(
217+
newPivot: BlockHeader,
218+
currentState: SyncState,
219+
updateReason: PivotBlockUpdateReason
220+
): Boolean = {
221+
newPivot.number >= currentState.pivotBlock.number && !stalePivotAfterRestart(newPivot, currentState, updateReason)
222+
}
223+
208224
def waitingForPivotBlockUpdate(updateReason: PivotBlockUpdateReason): Receive = handleCommonMessages orElse {
209-
case PivotBlockSelector.Result(pivotBlockHeader) =>
225+
case PivotBlockSelector.Result(pivotBlockHeader)
226+
if newPivotIsGoodEnough(pivotBlockHeader, syncState, updateReason) =>
210227
log.info(s"New pivot block with number ${pivotBlockHeader.number} received")
211-
if (pivotBlockHeader.number >= syncState.pivotBlock.number) {
212-
if (pivotBlockHeader.number == syncState.pivotBlock.number && updateReason.nodeRestart) {
213-
// it can happen after quick node restart than pivot block has not changed in the network. To keep whole
214-
// fast sync machinery running as expected we need to make sure that we will receive better pivot than current
215-
log.info("Received stale pivot after restart, asking for new pivot")
216-
reScheduleAskForNewPivot(updateReason)
217-
} else {
218-
updatePivotSyncState(updateReason, pivotBlockHeader)
219-
syncState = syncState.copy(updatingPivotBlock = false)
220-
context become this.receive
221-
processSyncing()
222-
}
223-
} else {
224-
log.info("Received target block is older than old one, re-scheduling asking for new one")
225-
reScheduleAskForNewPivot(updateReason)
226-
}
228+
updatePivotSyncState(updateReason, pivotBlockHeader)
229+
context become this.receive
230+
processSyncing()
231+
232+
case PivotBlockSelector.Result(pivotBlockHeader)
233+
if !newPivotIsGoodEnough(pivotBlockHeader, syncState, updateReason) =>
234+
log.info("Received pivot block is older than old one, re-scheduling asking for new one")
235+
reScheduleAskForNewPivot(updateReason)
227236

228237
case PersistSyncState => persistSyncState()
229238

230239
case UpdatePivotBlock(state) => updatePivotBlock(state)
231240
}
232241

233-
private def updatePivotBlock(state: PivotBlockUpdateReason): Unit = {
242+
private def updatePivotBlock(updateReason: PivotBlockUpdateReason): Unit = {
234243
if (syncState.pivotBlockUpdateFailures <= syncConfig.maximumTargetUpdateFailures) {
235244
if (assignedHandlers.nonEmpty || syncState.blockChainWorkQueued) {
236245
log.info(s"Still waiting for some responses, rescheduling pivot block update")
237-
scheduler.scheduleOnce(1.second, self, UpdatePivotBlock(state))
246+
scheduler.scheduleOnce(1.second, self, UpdatePivotBlock(updateReason))
238247
processSyncing()
239248
} else {
240-
askForPivotBlockUpdate(state)
249+
askForPivotBlockUpdate(updateReason)
241250
}
242251
} else {
243252
log.warning(s"Sync failure! Number of pivot block update failures reached maximum.")
@@ -253,8 +262,9 @@ class FastSync(
253262
// Empty root has means that there were no transactions in blockchain, and Mpt trie is empty
254263
// Asking for this root would result only with empty transactions
255264
if (syncState.pivotBlock.stateRoot == ByteString(MerklePatriciaTrie.EmptyRootHash)) {
256-
syncState = syncState.copy(stateSyncFinished = true)
265+
syncState = syncState.copy(stateSyncFinished = true, updatingPivotBlock = false)
257266
} else {
267+
syncState = syncState.copy(updatingPivotBlock = false)
258268
stateSyncRestartRequested = false
259269
syncStateScheduler ! StartSyncingTo(pivotBlockHeader.stateRoot, pivotBlockHeader.number)
260270
}
@@ -276,7 +286,7 @@ class FastSync(
276286
syncState =
277287
syncState.updatePivotBlock(pivotBlockHeader, syncConfig.fastSyncBlockValidationX, updateFailures = true)
278288

279-
case NodeRestart =>
289+
case SyncRestart =>
280290
// in case of node restart we are sure that new pivotBlockHeader > current pivotBlockHeader
281291
syncState = syncState.updatePivotBlock(
282292
pivotBlockHeader,
@@ -541,7 +551,7 @@ class FastSync(
541551
(info.maxBlockNumber - syncConfig.pivotBlockOffset) - state.pivotBlock.number >= syncConfig.maxPivotBlockAge
542552
}
543553

544-
private def getPeerWithTooFreshNewBlock(
554+
private def getPeersWithFreshEnoughPivot(
545555
peers: NonEmptyList[(Peer, PeerInfo)],
546556
state: SyncState,
547557
syncConfig: SyncConfig
@@ -569,7 +579,7 @@ class FastSync(
569579
(peerWithBestBlockInNetwork._2.maxBlockNumber - syncConfig.pivotBlockOffset) - syncState.pivotBlock.number
570580

571581
val peersWithTooFreshPossiblePivotBlock =
572-
getPeerWithTooFreshNewBlock(NonEmptyList.fromListUnsafe(currentPeers), syncState, syncConfig)
582+
getPeersWithFreshEnoughPivot(NonEmptyList.fromListUnsafe(currentPeers), syncState, syncConfig)
573583

574584
if (peersWithTooFreshPossiblePivotBlock.isEmpty) {
575585
log.info(
@@ -830,7 +840,8 @@ object FastSync {
830840
copy(
831841
pivotBlock = newPivot,
832842
safeDownloadTarget = newPivot.number + numberOfSafeBlocks,
833-
pivotBlockUpdateFailures = if (updateFailures) pivotBlockUpdateFailures + 1 else pivotBlockUpdateFailures
843+
pivotBlockUpdateFailures = if (updateFailures) pivotBlockUpdateFailures + 1 else pivotBlockUpdateFailures,
844+
updatingPivotBlock = false
834845
)
835846

836847
def isBlockchainWorkFinished: Boolean = {
@@ -861,14 +872,14 @@ object FastSync {
861872
case object ImportedPivotBlock extends HeaderProcessingResult
862873

863874
sealed abstract class PivotBlockUpdateReason {
864-
def nodeRestart: Boolean = this match {
875+
def isSyncRestart: Boolean = this match {
865876
case ImportedLastBlock => false
866877
case LastBlockValidationFailed => false
867-
case NodeRestart => true
878+
case SyncRestart => true
868879
}
869880
}
870881

871882
case object ImportedLastBlock extends PivotBlockUpdateReason
872883
case object LastBlockValidationFailed extends PivotBlockUpdateReason
873-
case object NodeRestart extends PivotBlockUpdateReason
884+
case object SyncRestart extends PivotBlockUpdateReason
874885
}

src/main/scala/io/iohk/ethereum/blockchain/sync/SyncController.scala

Lines changed: 23 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@ class SyncController(
1919
ommersPool: ActorRef,
2020
etcPeerManager: ActorRef,
2121
syncConfig: SyncConfig,
22-
externalSchedulerOpt: Option[Scheduler] = None)
23-
extends Actor
22+
externalSchedulerOpt: Option[Scheduler] = None
23+
) extends Actor
2424
with ActorLogging {
2525

2626
import SyncController._
@@ -29,8 +29,8 @@ class SyncController(
2929

3030
override def receive: Receive = idle
3131

32-
def idle: Receive = {
33-
case Start => start()
32+
def idle: Receive = { case Start =>
33+
start()
3434
}
3535

3636
def runningFastSync(fastSync: ActorRef): Receive = {
@@ -41,8 +41,8 @@ class SyncController(
4141
case other => fastSync.forward(other)
4242
}
4343

44-
def runningRegularSync(regularSync: ActorRef): Receive = {
45-
case other => regularSync.forward(other)
44+
def runningRegularSync(regularSync: ActorRef): Receive = { case other =>
45+
regularSync.forward(other)
4646
}
4747

4848
def start(): Unit = {
@@ -54,15 +54,17 @@ class SyncController(
5454
startFastSync()
5555
case (true, true) =>
5656
log.warning(
57-
s"do-fast-sync is set to $doFastSync but fast sync cannot start because it has already been completed")
57+
s"do-fast-sync is set to $doFastSync but fast sync cannot start because it has already been completed"
58+
)
5859
startRegularSync()
5960
case (true, false) =>
6061
startRegularSync()
6162
case (false, false) =>
6263
//Check whether fast sync was started before
6364
if (fastSyncStateStorage.getSyncState().isDefined) {
6465
log.warning(
65-
s"do-fast-sync is set to $doFastSync but regular sync cannot start because fast sync hasn't completed")
66+
s"do-fast-sync is set to $doFastSync but regular sync cannot start because fast sync hasn't completed"
67+
)
6668
startFastSync()
6769
} else
6870
startRegularSync()
@@ -79,14 +81,17 @@ class SyncController(
7981
peerEventBus,
8082
etcPeerManager,
8183
syncConfig,
82-
scheduler),
83-
"fast-sync")
84+
scheduler
85+
),
86+
"fast-sync"
87+
)
8488
fastSync ! FastSync.Start
8589
context become runningFastSync(fastSync)
8690
}
8791

8892
def startRegularSync(): Unit = {
89-
val peersClient = context.actorOf(PeersClient.props(etcPeerManager, peerEventBus, syncConfig, scheduler), "peers-client")
93+
val peersClient =
94+
context.actorOf(PeersClient.props(etcPeerManager, peerEventBus, syncConfig, scheduler), "peers-client")
9095
val regularSync = context.actorOf(
9196
RegularSync.props(
9297
peersClient,
@@ -97,7 +102,8 @@ class SyncController(
97102
syncConfig,
98103
ommersPool,
99104
pendingTransactionsManager,
100-
scheduler),
105+
scheduler
106+
),
101107
"regular-sync"
102108
)
103109
regularSync ! RegularSync.Start
@@ -118,7 +124,8 @@ object SyncController {
118124
pendingTransactionsManager: ActorRef,
119125
ommersPool: ActorRef,
120126
etcPeerManager: ActorRef,
121-
syncConfig: SyncConfig): Props =
127+
syncConfig: SyncConfig
128+
): Props =
122129
Props(
123130
new SyncController(
124131
appStateStorage,
@@ -130,7 +137,9 @@ object SyncController {
130137
pendingTransactionsManager,
131138
ommersPool,
132139
etcPeerManager,
133-
syncConfig))
140+
syncConfig
141+
)
142+
)
134143

135144
case object Start
136145
}

src/main/scala/io/iohk/ethereum/blockchain/sync/SyncStateScheduler.scala

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -110,13 +110,12 @@ class SyncStateScheduler(blockchain: Blockchain, bloomFilter: BloomFilter[ByteSt
110110
// complex due to pruning.
111111
val (nodes, newState) = state.getNodesToPersist
112112
nodes.foreach { case (hash, (data, reqType)) =>
113+
bloomFilter.put(hash)
113114
reqType match {
114115
case _: CodeRequest =>
115116
blockchain.storeEvmCode(hash, data).commit()
116-
bloomFilter.put(hash)
117117
case _: NodeRequest =>
118118
blockchain.saveNode(hash, data.toArray, targetBlockNumber)
119-
bloomFilter.put(hash)
120119
}
121120
}
122121
newState
@@ -245,17 +244,10 @@ class SyncStateScheduler(blockchain: Blockchain, bloomFilter: BloomFilter[ByteSt
245244
}
246245

247246
private def isRequestedHashAlreadyCommitted(state: SchedulerState, req: StateNodeRequest): Boolean = {
248-
if (state.memBatch.contains(req.nodeHash)) {
249-
true
250-
} else {
251-
if (bloomFilter.mightContain(req.nodeHash)) {
252-
// hash might by in db double check
253-
isInDatabase(req)
254-
} else {
255-
// hash is definitely not known
256-
false
257-
}
258-
}
247+
state.memBatch.contains(req.nodeHash) ||
248+
(bloomFilter.mightContain(req.nodeHash) && isInDatabase(
249+
req
250+
)) // if hash is in bloom filter we need to double check on db
259251
}
260252
}
261253

src/main/scala/io/iohk/ethereum/utils/Config.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -167,9 +167,9 @@ object Config {
167167
fastSyncBlockValidationX = syncConfig.getInt("fast-sync-block-validation-x"),
168168
maxTargetDifference = syncConfig.getInt("max-target-difference"),
169169
maximumTargetUpdateFailures = syncConfig.getInt("maximum-target-update-failures"),
170-
stateSyncBloomFilterSize = syncConfig.getInt("state-sync-bloomFilter-size"),
171-
stateSyncPersistBatchSize = syncConfig.getInt("state-sync-persistBatch-size"),
172-
pivotBlockReScheduleInterval = syncConfig.getDuration("pivot-block-reSchedule-interval").toMillis.millis,
170+
stateSyncBloomFilterSize = syncConfig.getInt("state-sync-bloom-filter-size"),
171+
stateSyncPersistBatchSize = syncConfig.getInt("state-sync-persist-batch-size"),
172+
pivotBlockReScheduleInterval = syncConfig.getDuration("pivot-block-reschedule-interval").toMillis.millis,
173173
maxPivotBlockAge = syncConfig.getInt("max-pivot-block-age")
174174
)
175175
}

src/test/resources/application.conf

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -118,11 +118,11 @@ mantis {
118118
min-peers-to-choose-pivot-block = 2
119119
peers-to-choose-pivot-block-margin = 1
120120
pivot-block-offset = 500
121-
state-sync-bloomFilter-size = 20000
121+
state-sync-bloom-filter-size = 20000
122122

123-
state-sync-persistBatch-size = 10000
123+
state-sync-persist-batch-size = 10000
124124

125-
pivot-block-reSchedule-interval = 15.seconds
125+
pivot-block-reschedule-interval = 15.seconds
126126

127127
max-pivot-block-age = 96
128128
}

0 commit comments

Comments
 (0)