Skip to content

Update the QA project to use test cluster api #1430

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Mar 9, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,28 +19,30 @@

package org.elasticsearch.hadoop.gradle.fixture.hadoop

import static org.elasticsearch.hadoop.gradle.fixture.hadoop.conf.SettingsContainer.FileSettings

class ConfigFormats {

static Closure<String> hadoopXML() {
return { Map conf ->
String props = conf.collect { key, value ->
return { FileSettings conf ->
String props = conf.resolve().collect { key, value ->
"<property>\n\t\t<name>${key}</name>\n\t\t<value>${value}</value>\n\t</property>"
}.join("\n\t")
return "<configuration>\n\t${props}\n</configuration>"
}
}

static Closure<String> propertyFile() {
return { Map conf ->
conf.collect { key, value ->
return { FileSettings conf ->
conf.resolve().collect { key, value ->
"${key}=${value}"
}.join("\n")
}
}

static Closure<String> whiteSpaced() {
return { Map conf ->
conf.collect { key, value ->
return { FileSettings conf ->
conf.resolve().collect { key, value ->
"${key} ${value}"
}.join("\n")
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import org.apache.tools.ant.DefaultLogger
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.test.Fixture
import org.elasticsearch.gradle.testclusters.DefaultTestClustersTask
import org.elasticsearch.hadoop.gradle.fixture.hadoop.conf.HadoopClusterConfiguration
import org.elasticsearch.hadoop.gradle.fixture.hadoop.conf.InstanceConfiguration
import org.elasticsearch.hadoop.gradle.fixture.hadoop.conf.RoleConfiguration
Expand All @@ -42,18 +43,21 @@ import org.apache.tools.ant.taskdefs.condition.Os

import java.nio.file.Paths

import static org.elasticsearch.hadoop.gradle.fixture.hadoop.conf.SettingsContainer.FileSettings

/**
* A helper for creating tasks to build a cluster that is used by a task, and tear down the cluster
* when the task is finished.
*/
class HadoopClusterFormationTasks {

/**
* A start and stop task for a fixture
* A start and stop task for an instance, as well as any and all instance specific setup and teardown tasks
*/
static class TaskPair {
static class InstanceTasks {
Task startTask
Task stopTask
List<Task> allTasks
}

/**
Expand Down Expand Up @@ -99,20 +103,20 @@ class HadoopClusterFormationTasks {
List<InstanceInfo> nodes = []

// Create the fixtures for each service
List<TaskPair> clusterTaskPairs = []
List<InstanceTasks> clusterTaskPairs = []
for (ServiceConfiguration serviceConfiguration : clusterConfiguration.getServices()) {

// Get the download task for this service's package and add it to the service's dependency tasks
DistributionTasks distributionTasks = getOrConfigureDistributionDownload(project, serviceConfiguration)

// Keep track of the start tasks in this service
List<TaskPair> serviceTaskPairs = []
List<InstanceTasks> serviceTaskPairs = []

// Create fixtures for each role in the service
for (RoleConfiguration roleConfiguration : serviceConfiguration.getRoles()) {

// Keep track of the start tasks in this role
List<TaskPair> roleTaskPairs = []
List<InstanceTasks> roleTaskPairs = []

// Create fixtures for each instance in the role
for (InstanceConfiguration instanceConfiguration : roleConfiguration.getInstances()) {
Expand All @@ -136,7 +140,7 @@ class HadoopClusterFormationTasks {
nodes.add(instanceInfo)

// Create the tasks for the instance
TaskPair instanceTasks
InstanceTasks instanceTasks
try {
instanceTasks = configureNode(project, prefix, instanceDependencies, instanceInfo,
distributionTasks)
Expand Down Expand Up @@ -168,6 +172,16 @@ class HadoopClusterFormationTasks {
}
}
}

// Check to see if any of the instance tasks are test cluster aware, and if they are, set the
// es cluster to be whichever cluster was configured, if any
if (instanceInfo.elasticsearchCluster != null) {
for (Task instanceTask : instanceTasks.allTasks) {
if (instanceTask instanceof DefaultTestClustersTask) {
((DefaultTestClustersTask) instanceTask).useCluster(instanceInfo.elasticsearchCluster)
}
}
}
}
// Make each task in the role depend on and also be finalized by each instance in the service.
List<Task> startTasks = roleTaskPairs.collect{it.startTask}
Expand Down Expand Up @@ -234,31 +248,44 @@ class HadoopClusterFormationTasks {
return new DistributionTasks(download: downloadTask, verify: verifyTask)
}

static TaskPair configureNode(Project project, String prefix, Object dependsOn, InstanceInfo node,
DistributionTasks distribution) {
Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) {
static InstanceTasks configureNode(Project project, String prefix, Object dependsOn, InstanceInfo node,
DistributionTasks distribution) {
List<Task> instanceTasks = []

Task clean = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) {
delete node.homeDir
delete node.cwd
group = 'hadoopFixture'
}
instanceTasks.add(clean)

// Only create CWD and check previous if the role is an executable process
Task lastInitTask = clean
if (node.getConfig().getRoleDescriptor().isExecutableProcess()) {
setup = project.tasks.create(name: taskName(prefix, node, 'createCwd'), type: DefaultTask, dependsOn: setup) {
Task createCwd = project.tasks.create(name: taskName(prefix, node, 'createCwd'), type: DefaultTask, dependsOn: clean) {
doLast {
node.cwd.mkdirs()
}
outputs.dir node.cwd
group = 'hadoopFixture'
}
setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node)
setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node)
Task checkPrevious = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, createCwd, node)
Task stopPrevious = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, checkPrevious, node)
lastInitTask = stopPrevious

instanceTasks.add(createCwd)
instanceTasks.add(checkPrevious)
instanceTasks.add(stopPrevious)
}

// Always extract the package contents, and configure the files
setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, distribution)
setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node)
setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node)
Task extract = configureExtractTask(taskName(prefix, node, 'extract'), project, lastInitTask, node, distribution)
Task configure = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, extract, node)
Task extraConfig = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, configure, node)

instanceTasks.add(extract)
instanceTasks.add(configure)
instanceTasks.add(extraConfig)

// If the role for this instance is not a process, we skip creating start and stop tasks for it.
if (!node.getConfig().getRoleDescriptor().isExecutableProcess()) {
Expand All @@ -267,15 +294,16 @@ class HadoopClusterFormationTasks {
for (Object dependency : node.config.getDependencies()) {
if (dependency instanceof Fixture) {
def depStop = ((Fixture)dependency).stopTask
setup.finalizedBy(depStop)
extraConfig.finalizedBy(depStop)
}
}
return new TaskPair(startTask: setup)
return new InstanceTasks(startTask: extraConfig, allTasks: instanceTasks)
}

Map<String, Object[]> setupCommands = new LinkedHashMap<>()
setupCommands.putAll(node.config.getServiceDescriptor().defaultSetupCommands(node.config))
setupCommands.putAll(node.config.getSetupCommands())
Task lastSetupCommand = extraConfig
for (Map.Entry<String, Object[]> command : setupCommands) {
// the first argument is the actual script name, relative to home
Object[] args = command.getValue().clone()
Expand All @@ -295,17 +323,21 @@ class HadoopClusterFormationTasks {
commandPath = node.homeDir.toPath().resolve(args[0].toString()).toString()
}
args[0] = commandPath
setup = configureExecTask(taskName(prefix, node, command.getKey()), project, setup, node, args)
lastSetupCommand = configureExecTask(taskName(prefix, node, command.getKey()), project, lastSetupCommand, node, args)
instanceTasks.add(lastSetupCommand)
}

// Configure daemon start task
Task start = configureStartTask(taskName(prefix, node, 'start'), project, setup, node)
Task start = configureStartTask(taskName(prefix, node, 'start'), project, lastSetupCommand, node)
instanceTasks.add(start)

// Configure wait task
Task wait = configureWaitTask(taskName(prefix, node, 'wait'), project, node, start, 30)
instanceTasks.add(wait)

// Configure daemon stop task
Task stop = configureStopTask(taskName(prefix, node, 'stop'), project, [], node)
instanceTasks.add(stop)

// We're running in the background, so make sure that the stop command is called after all cluster tasks finish
wait.finalizedBy(stop)
Expand All @@ -319,7 +351,7 @@ class HadoopClusterFormationTasks {
stop.finalizedBy(depStop)
}
}
return new TaskPair(startTask: wait, stopTask: stop)
return new InstanceTasks(startTask: wait, stopTask: stop, allTasks: instanceTasks)
}

static Task configureCheckPreviousTask(String name, Project project, Task setup, InstanceInfo node) {
Expand All @@ -342,13 +374,13 @@ class HadoopClusterFormationTasks {

static Task configureWriteConfigTask(String name, Project project, Task setup, InstanceInfo node) {
// Add all node level configs to node Configuration
return project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) {
return project.tasks.create(name: name, type: DefaultTestClustersTask, dependsOn: setup) {
group = 'hadoopFixture'
doFirst {
// Write each config file needed
node.configFiles.forEach { configFile ->
String configName = configFile.getName()
Map<String, String> configFileEntries = node.configContents.get(configName)
FileSettings configFileEntries = node.configContents.get(configName)
if (configFileEntries == null) {
throw new GradleException("Could not find contents of [${configFile}] settings file from deployment options.")
}
Expand Down Expand Up @@ -387,7 +419,6 @@ class HadoopClusterFormationTasks {
return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec ->
exec.group = 'hadoopFixture'
exec.workingDir node.cwd
exec.environment 'JAVA_HOME', node.getJavaHome()
exec.environment(node.env)

// Configure HADOOP_OPTS (or similar env) - adds system properties, assertion flags, remote debug etc
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
package org.elasticsearch.hadoop.gradle.fixture.hadoop

import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.info.BuildParams
import org.elasticsearch.gradle.testclusters.ElasticsearchCluster
import org.elasticsearch.hadoop.gradle.util.WaitForURL
import org.elasticsearch.hadoop.gradle.fixture.hadoop.conf.InstanceConfiguration
import org.gradle.api.GradleException
Expand All @@ -30,6 +30,8 @@ import java.nio.file.Path
import java.nio.file.Paths
import java.util.concurrent.TimeUnit

import static org.elasticsearch.hadoop.gradle.fixture.hadoop.conf.SettingsContainer.FileSettings

/**
* Generic information for any process running in a hadoop ecosystem.
*
Expand Down Expand Up @@ -70,7 +72,7 @@ class InstanceInfo {
/** The config files */
List<File> configFiles

Map<String, Map<String, String>> configContents
Map<String, FileSettings> configContents

/** Closure that renders the contents of the config file */
Closure<String> configFileFormatter
Expand All @@ -84,8 +86,8 @@ class InstanceInfo {
/** stdout/stderr log of the service process for this instance */
File startLog

/** Major version of java this node runs with, or {@code null} if using the runtime java version */
Integer javaVersion
/** Location of the java installation to use when running processes **/
String javaHome

/** environment variables to start the node with */
Map<String, String> env
Expand All @@ -108,6 +110,9 @@ class InstanceInfo {
/** buffer for ant output when starting this node */
ByteArrayOutputStream buffer = new ByteArrayOutputStream()

/** Elasticsearch cluster dependency for tasks **/
ElasticsearchCluster elasticsearchCluster

/**
* A closure to call before the cluster is considered ready. The closure is passed the node info,
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
Expand Down Expand Up @@ -155,13 +160,16 @@ class InstanceInfo {
startLog = new File(cwd, 'run.log')

// We just default to the current runtime at this time
javaVersion = 8
javaHome = config.getJavaHome()

// Prepare Environment
env = [:]
env.putAll(config.getEnvironmentVariables())
config.getServiceDescriptor().finalizeEnv(env, config)

// Add JAVA_HOME to the environment
env['JAVA_HOME'] = javaHome

// Prepare startup command and arguments
args = []
List<String> startCommandLine = config.getServiceDescriptor().startCommand(config)
Expand Down Expand Up @@ -202,6 +210,8 @@ class InstanceInfo {
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
args.add('"') // end the entire command, quoted
}

this.elasticsearchCluster = config.getElasticsearchCluster()
}

Path binPath() {
Expand Down Expand Up @@ -235,19 +245,13 @@ class InstanceInfo {
throw new UnsupportedOperationException("JNAKernal32Library is compiled for Java 10 and up.")
}

/** Return the java home used by this node. */
String getJavaHome() {
return javaVersion == null ? project.runtimeJavaHome : BuildParams.javaVersions.find { it.version == javaVersion }.javaHome.absolutePath
}

/** Returns debug string for the command that started this node. */
String getCommandString() {
String commandString = "\nService ${config.serviceDescriptor.serviceName()}: ${config.roleDescriptor.roleName()} configuration:\n"
commandString += "|-----------------------------------------\n"
commandString += "| cwd: ${cwd}\n"
commandString += "| command: ${executable} ${args.join(' ')}\n"
commandString += '| environment:\n'
commandString += "| JAVA_HOME: ${javaHome}\n"
env.each { k, v -> commandString += "| ${k}: ${v}\n" }
commandString += "|\n| [${backgroundScript.name}]\n"
backgroundScript.eachLine('UTF-8', { line -> commandString += " ${line}\n"})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ import org.elasticsearch.hadoop.gradle.fixture.hadoop.conf.InstanceConfiguration
import org.elasticsearch.hadoop.gradle.fixture.hadoop.conf.ServiceConfiguration
import org.elasticsearch.hadoop.gradle.tasks.ApacheMirrorDownload

import static org.elasticsearch.hadoop.gradle.fixture.hadoop.conf.SettingsContainer.FileSettings

/**
* Describes deployment characteristics for different Hadoop ecosystem projects.
*
Expand Down Expand Up @@ -100,7 +102,7 @@ interface ServiceDescriptor {
/**
* Collect all configuration entries, setting defaults for the service, role, and instance.
*/
Map<String, Map<String, String>> collectConfigFilesContents(InstanceConfiguration configuration)
Map<String, FileSettings> collectConfigFilesContents(InstanceConfiguration configuration)

/**
* Closure that formats a configuration map into a String for the config file contents.
Expand All @@ -110,7 +112,7 @@ interface ServiceDescriptor {
/**
* Produces the HTTP/S URI to reach the web front end for a running instance, or null if there is no web interface.
*/
String httpUri(InstanceConfiguration configuration, Map<String, Map<String, String>> configFileContents)
String httpUri(InstanceConfiguration configuration, Map<String, FileSettings> configFileContents)

/**
* The command line to use for starting the given role and instance.
Expand Down
Loading