Skip to content

Commit bcfc6e5

Browse files
committed
Merge remote-tracking branch 'origin/develop' into etcm-102/fast-sync-integration-test
2 parents 851b9fc + 57b23ac commit bcfc6e5

25 files changed

+695
-219
lines changed

README.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,17 @@ in the root of the project.
4343

4444
This updates all submodules and creates a distribution zip in `~/target/universal/`.
4545

46+
### Monitoring
47+
48+
#### Locally build & run monitoring client
49+
50+
```
51+
# Build monitoring client docker image
52+
projectRoot $ docker build -f ./docker/monitoring-client.Dockerfile -t mantis-monitoring-client ./docker/
53+
# Run monitoring client in http://localhost:9090
54+
projectRoot $ docker run --network=host mantis-monitoring-client
55+
```
56+
4657
### Feedback
4758

4859
Feedback gratefully received through the Ethereum Classic Forum (http://forum.ethereumclassic.org/)

build.sbt

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ import scala.sys.process.Process
66
val nixBuild = sys.props.isDefinedAt("nix")
77

88
val commonSettings = Seq(
9-
name := "mantis",
9+
name := "mantis-core",
1010
version := "3.0",
1111
scalaVersion := "2.12.12",
1212
testOptions in Test += Tests
@@ -51,8 +51,6 @@ val dep = {
5151
"net.java.dev.jna" % "jna" % "4.5.1",
5252
"org.scala-lang.modules" %% "scala-parser-combinators" % "1.1.0",
5353
"com.github.scopt" %% "scopt" % "3.7.0",
54-
// Metrics (https://github.com/DataDog/java-dogstatsd-client)
55-
"com.datadoghq" % "java-dogstatsd-client" % "2.5",
5654
"org.xerial.snappy" % "snappy-java" % "1.1.7.2",
5755
// Logging
5856
"ch.qos.logback" % "logback-classic" % "1.2.3",
@@ -87,14 +85,19 @@ val root = {
8785
.configs(Integration, Benchmark, Evm, Ets, Snappy, Rpc)
8886
.settings(commonSettings: _*)
8987
.settings(
90-
libraryDependencies ++= dep
88+
libraryDependencies ++= Seq(
89+
dep,
90+
Dependencies.micrometer,
91+
Dependencies.prometheus
92+
).flatten
9193
)
92-
.settings(inConfig(Integration)(Defaults.testSettings): _*)
93-
.settings(inConfig(Benchmark)(Defaults.testSettings): _*)
94-
.settings(inConfig(Evm)(Defaults.testSettings): _*)
95-
.settings(inConfig(Ets)(Defaults.testSettings): _*)
96-
.settings(inConfig(Snappy)(Defaults.testSettings): _*)
97-
.settings(inConfig(Rpc)(Defaults.testSettings): _*)
94+
.settings(executableScriptName := name.value)
95+
.settings(inConfig(Integration)(Defaults.testSettings) : _*)
96+
.settings(inConfig(Benchmark)(Defaults.testSettings) : _*)
97+
.settings(inConfig(Evm)(Defaults.testSettings) : _*)
98+
.settings(inConfig(Ets)(Defaults.testSettings) : _*)
99+
.settings(inConfig(Snappy)(Defaults.testSettings) : _*)
100+
.settings(inConfig(Rpc)(Defaults.testSettings) : _*)
98101

99102
if (!nixBuild)
100103
root

docker/monitoring-client.Dockerfile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
FROM prom/prometheus
2+
ADD ./prometheus.yml /etc/prometheus/

docker/prometheus.yml

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# Please, don't use any default port allocations.
2+
# https://github.com/prometheus/prometheus/wiki/Default-port-allocations
3+
global:
4+
scrape_interval: 1m
5+
scrape_timeout: 10s
6+
evaluation_interval: 1m
7+
scrape_configs:
8+
- job_name: prometheus
9+
honor_timestamps: true
10+
scrape_interval: 5s
11+
scrape_timeout: 5s
12+
metrics_path: /metrics
13+
scheme: http
14+
static_configs:
15+
- targets:
16+
- localhost:9090
17+
labels:
18+
alias: prometheus
19+
- job_name: node
20+
honor_timestamps: true
21+
scrape_interval: 10s
22+
scrape_timeout: 10s
23+
metrics_path: /metrics
24+
scheme: http
25+
static_configs:
26+
- targets:
27+
- localhost:13798
28+
labels:
29+
alias: mantis-node

project/Dependencies.scala

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import sbt._
2+
3+
object Dependencies {
4+
5+
val prometheus: Seq[ModuleID] = {
6+
val provider = "io.prometheus"
7+
val version = "0.8.0"
8+
Seq(
9+
provider % "simpleclient" % version,
10+
provider % "simpleclient_logback" % version,
11+
provider % "simpleclient_hotspot" % version,
12+
provider % "simpleclient_httpserver" % version
13+
)
14+
}
15+
16+
val micrometer: Seq[ModuleID] = {
17+
val provider = "io.micrometer"
18+
val version = "1.0.4"
19+
Seq(
20+
// Required to compile metrics library https://github.com/micrometer-metrics/micrometer/issues/1133#issuecomment-452434205
21+
"com.google.code.findbugs" % "jsr305" % "3.0.2" % Optional,
22+
provider % "micrometer-core" % version,
23+
provider % "micrometer-registry-jmx" % version,
24+
provider % "micrometer-registry-prometheus" % version
25+
)
26+
}
27+
}

repo.nix

Lines changed: 312 additions & 16 deletions
Large diffs are not rendered by default.

src/main/resources/application.conf

Lines changed: 3 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -504,35 +504,12 @@ mantis {
504504

505505
metrics {
506506
# Set to `true` iff your deployment supports metrics collection.
507-
# We push metrics to a StatsD-compatible agent and we use Datadog for collecting them in one place.
507+
# We expose metrics using a Prometheus server
508508
# We default to `false` here because we do not expect all deployments to support metrics collection.
509509
enabled = false
510510

511-
# The StatsD-compatible agent host.
512-
host = "localhost"
513-
514-
# The StatsD-compatible agent port.
515-
port = 8125
516-
517-
# All metrics sent will be tagged with the environment.
518-
# Sample values are: `public`, `private`, `production`, `dev`, depending on the use-case.
519-
# If metrics are `enabled`, this is mandatory and must be set explicitly to a non-null value.
520-
#
521-
environment = ""
522-
523-
# All metrics sent will be tagged with the deployment.
524-
# Sample values are: `kevm-testnet`, `iele-testnet`, `raft-kevm`, depending on the use-case.
525-
# If metrics are `enabled`, this is mandatory and must be set explicitly to a non-null value.
526-
#
527-
deployment = ""
528-
529-
# Size of the metrics requests queue.
530-
# If the queue contains that many outstanding requests to the metrics agent, then
531-
# subsequent requests are blocked until the queue has room again.
532-
queue-size = 1024
533-
534-
# Iff true, any errors during metrics client operations will be logged.
535-
log-errors = true
511+
# The port for setting up a Prometheus server over localhost.
512+
port = 13798
536513
}
537514

538515
logging {

src/main/resources/logback.xml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@
4141
</encoder>
4242
</appender>
4343

44+
<appender name="METRICS" class="io.prometheus.client.logback.InstrumentedAppender" />
45+
4446
<root level="DEBUG">
4547
<if condition='p("ASJSON").contains("true")'>
4648
<then>
@@ -51,6 +53,7 @@
5153
</else>
5254
</if>
5355
<appender-ref ref="FILE" />
56+
<appender-ref ref="METRICS" />
5457
</root>
5558

5659
<logger name="io.iohk.ethereum.network.rlpx.RLPxConnectionHandler" level="INFO" />

src/main/scala/io/iohk/ethereum/domain/BlockBody.scala

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@ case class BlockBody(transactionList: Seq[SignedTransaction], uncleNodesList: Se
1010
|uncleNodesList: $uncleNodesList
1111
|}
1212
""".stripMargin
13+
14+
lazy val numberOfTxs: Int = transactionList.size
15+
16+
lazy val numberOfUncles: Int = uncleNodesList.size
1317
}
1418

1519
object BlockBody {

src/main/scala/io/iohk/ethereum/ledger/BlockImport.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ import io.iohk.ethereum.consensus.validators.BlockHeaderError.HeaderParentNotFou
55
import io.iohk.ethereum.domain._
66
import io.iohk.ethereum.ledger.BlockExecutionError.{ UnKnownExecutionError, ValidationBeforeExecError }
77
import io.iohk.ethereum.ledger.BlockQueue.Leaf
8-
import io.iohk.ethereum.metrics.{ Metrics, MetricsClient }
98
import io.iohk.ethereum.utils.{ BlockchainConfig, Logger }
109
import org.bouncycastle.util.encoders.Hex
1110

@@ -68,8 +67,9 @@ class BlockImport(
6867
})
6968

7069
if (importedBlocks.nonEmpty) {
71-
val maxNumber = importedBlocks.map(_.block.header.number).max.toLong
72-
MetricsClient.get().gauge(Metrics.LedgerImportBlockNumber, maxNumber)
70+
importedBlocks.map(
71+
blockData => BlockMetrics.measure(blockData.block, blockchain.getBlockByHash)
72+
)
7373
}
7474

7575
result
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
package io.iohk.ethereum.ledger
2+
3+
import akka.util.ByteString
4+
import com.google.common.util.concurrent.AtomicDouble
5+
import io.iohk.ethereum.domain.Block
6+
import io.iohk.ethereum.metrics.MetricsContainer
7+
8+
case object BlockMetrics extends MetricsContainer {
9+
10+
private[this] final val BlockNumberGauge =
11+
metrics.registry.gauge("sync.block.number.gauge", new AtomicDouble(0d))
12+
private[this] final val BlockGasLimitGauge =
13+
metrics.registry.gauge("sync.block.gasLimit.gauge", new AtomicDouble(0d))
14+
private[this] final val BlockGasUsedGauge =
15+
metrics.registry.gauge("sync.block.gasUsed.gauge", new AtomicDouble(0d))
16+
private[this] final val BlockDifficultyGauge =
17+
metrics.registry.gauge("sync.block.difficulty.gauge", new AtomicDouble(0d))
18+
private[this] final val BlockTransactionsGauge =
19+
metrics.registry.gauge("sync.block.transactions.gauge", new AtomicDouble(0d))
20+
private[this] final val BlockUnclesGauge =
21+
metrics.registry.gauge("sync.block.uncles.gauge", new AtomicDouble(0d))
22+
private[this] final val TimeBetweenParentGauge =
23+
metrics.registry.gauge("sync.block.timeBetweenParent.seconds.gauge", new AtomicDouble(0d))
24+
25+
def measure(block: Block, getBlockByHashFn: ByteString => Option[Block]): Unit = {
26+
BlockNumberGauge.set(block.number.toDouble)
27+
BlockGasLimitGauge.set(block.header.gasLimit.toDouble)
28+
BlockGasUsedGauge.set(block.header.gasUsed.toDouble)
29+
BlockDifficultyGauge.set(block.header.difficulty.toDouble)
30+
BlockTransactionsGauge.set(block.body.numberOfTxs)
31+
BlockUnclesGauge.set(block.body.numberOfUncles)
32+
33+
getBlockByHashFn(block.header.parentHash) match {
34+
case Some(parentBlock) =>
35+
val timeBetweenBlocksInSeconds: Long =
36+
block.header.unixTimestamp - parentBlock.header.unixTimestamp
37+
TimeBetweenParentGauge.set(timeBetweenBlocksInSeconds)
38+
case None => ()
39+
}
40+
}
41+
42+
}
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
package io.iohk.ethereum.metrics
2+
3+
import io.micrometer.jmx.JmxConfig
4+
5+
class AppJmxConfig extends JmxConfig {
6+
override def get(key: String): String = null
7+
8+
override def prefix(): String = Metrics.MetricsPrefix
9+
10+
override def domain(): String = prefix()
11+
}
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
package io.iohk.ethereum.metrics
2+
3+
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
4+
5+
/**
6+
* A gauge that starts at `0` and can be triggered to go to `1`.
7+
* Next time it is sampled, it goes back to `0`.
8+
* This is normally used for either one-off signals (e.g. when an application starts)
9+
* or slowly re-appearing signals. Specifically, the sampling rate must be greater
10+
* than the rate the signal is triggered.
11+
*/
12+
class DeltaSpikeGauge(name: String, metrics: Metrics) {
13+
private[this] final val isTriggeredRef = new AtomicBoolean(false)
14+
private[this] final val valueRef = new AtomicInteger(0)
15+
16+
private[this] def getValue(): Double = {
17+
if (isTriggeredRef.compareAndSet(true, false)) {
18+
valueRef.getAndSet(0)
19+
} else {
20+
valueRef.get()
21+
}
22+
}
23+
24+
private[this] final val gauge = metrics.gauge(name, () getValue)
25+
26+
def trigger(): Unit = {
27+
if (isTriggeredRef.compareAndSet(false, true)) {
28+
valueRef.set(1)
29+
// Let one of the exporting metric registries pick up the `1`.
30+
// As soon as that happens, `getValue` will make sure that we go back to `0`.
31+
}
32+
}
33+
}
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
package io.iohk.ethereum.metrics
2+
3+
import io.iohk.ethereum.utils.Logger
4+
import io.iohk.ethereum.utils.LoggingUtils.getClassName
5+
import io.micrometer.core.instrument.composite.CompositeMeterRegistry
6+
import io.micrometer.core.instrument.config.MeterFilter
7+
import io.micrometer.core.instrument._
8+
import io.micrometer.prometheus.{PrometheusMeterRegistry, PrometheusConfig}
9+
import io.prometheus.client.CollectorRegistry
10+
import io.micrometer.jmx.JmxMeterRegistry
11+
12+
object MeterRegistryBuilder extends Logger {
13+
14+
private[this] final val StdMetricsClock = Clock.SYSTEM
15+
16+
private[this] def onMeterAdded(m: Meter): Unit =
17+
log.debug(s"New ${getClassName(m)} metric: " + m.getId.getName)
18+
19+
/**
20+
* Build our meter registry consist in:
21+
* 1. Create each Meter registry
22+
* 2. Config the resultant composition
23+
*/
24+
def build(metricsPrefix: String): MeterRegistry = {
25+
26+
val jmxMeterRegistry = new JmxMeterRegistry(new AppJmxConfig, StdMetricsClock)
27+
28+
log.info(s"Build JMX Meter Registry: ${jmxMeterRegistry}")
29+
30+
val prometheusMeterRegistry =
31+
new PrometheusMeterRegistry(
32+
PrometheusConfig.DEFAULT,
33+
CollectorRegistry.defaultRegistry,
34+
StdMetricsClock
35+
);
36+
37+
log.info(s"Build Prometheus Meter Registry: ${prometheusMeterRegistry}")
38+
39+
val registry = new CompositeMeterRegistry(
40+
StdMetricsClock,
41+
java.util.Arrays.asList(jmxMeterRegistry, prometheusMeterRegistry)
42+
)
43+
// Ensure that all metrics have the `Prefix`.
44+
// We are of course mainly interested in those that we do not control,
45+
// e.g. those coming from `JvmMemoryMetrics`.
46+
registry
47+
.config()
48+
.meterFilter(new MeterFilter {
49+
override def map(id: Meter.Id): Meter.Id = {
50+
id.withName(MetricsUtils.mkNameWithPrefix(metricsPrefix)(id.getName))
51+
}
52+
})
53+
.onMeterAdded(onMeterAdded)
54+
55+
registry
56+
}
57+
}

0 commit comments

Comments
 (0)