Skip to content

Set UserProvider before discovery in Spark SQL integrations #1934

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Mar 29, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions qa/kerberos/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -521,7 +521,7 @@ if (disableTests) {
// =============================================================================
// Spark Jobs
// =============================================================================

// Run the Spark job to load data to ES. Ensure Kerberos settings are available.
SparkApp sparkLoadData = config.createClusterTask('sparkLoadData', SparkApp.class) {
clusterConfiguration = config
Expand Down Expand Up @@ -682,7 +682,9 @@ if (disableTests) {

Map<String, Task> readJobs = [
'mr': mrReadData,
'spark': sparkReadData,
'sparkRDD': sparkReadData,
'sparkDF': sparkReadData,
'sparkDS': sparkReadData,
'hive': hiveReadData,
'pig': pigReadData
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,9 @@ public class AbstractClusterVerificationTests {
public static Collection<Object[]> params() {
List<Object[]> params = new ArrayList<>();
params.add(new Object[]{"mr", "part-m-", 345, true});
params.add(new Object[]{"spark", "part-", 345, true});
params.add(new Object[]{"sparkRDD", "part-", 345, true});
params.add(new Object[]{"sparkDF", "part-", 345, true});
params.add(new Object[]{"sparkDS", "part-", 345, true});
params.add(new Object[]{"hive", "000000_0", 345, false});
params.add(new Object[]{"pig", "part-m-", 345, true});
return params;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,12 @@
package org.elasticsearch.hadoop.qa.kerberos.spark

import java.security.PrivilegedExceptionAction

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.elasticsearch.hadoop.qa.kerberos.security.KeytabLogin
import org.elasticsearch.spark._
import org.elasticsearch.spark.sql._

class LoadToES(args: Array[String]) {

Expand All @@ -37,25 +38,16 @@ class LoadToES(args: Array[String]) {
}
val resource = sparkConf.get("spark.es.resource")
val fieldNames = sparkConf.get(LoadToES.CONF_FIELD_NAMES).split(",")
val schema = StructType(fieldNames.map(StructField(_, StringType)))

val df = spark.sqlContext.read.textFile(args(0))

val parsedData = df.rdd
.map(line => {
var record: Map[String, Object] = Map()
val fields = line.split('\t')
var fieldNum = 0
for (field <- fields) {
if (fieldNum < fieldNames.length) {
val fieldName = fieldNames(fieldNum)
record = record + (fieldName -> field)
}
fieldNum = fieldNum + 1
}
record
})
val df = spark.sqlContext.read
.schema(schema)
.option("sep", "\t")
.csv(args(0))

parsedData.saveToEs(resource)
df.rdd.map(row => row.getValuesMap(row.schema.fieldNames)).saveToEs(s"${resource}_rdd")
df.saveToEs(s"${resource}_df")
df.write.format("es").save(s"${resource}_ds")
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.elasticsearch.hadoop.qa.kerberos.security.KeytabLogin
import org.elasticsearch.spark._
import org.elasticsearch.spark.sql._

class ReadFromES(args: Array[String]) {

Expand All @@ -34,14 +35,22 @@ class ReadFromES(args: Array[String]) {
def run(): Unit = {
val resource = sparkConf.get("spark.es.resource")

spark.sparkContext.esJsonRDD(resource).saveAsTextFile(args(0))
// spark.sqlContext
// .read
// .format("es")
// .option("es.output.json", "true")
// .load(resource)
// .write
// .text(args(0))
// Expected directory names in :qa:kerberos:build.gradle readJobs
val rddOutputDir = s"${args(0)}RDD"
val dfOutputDir = s"${args(0)}DF"
val dsOutputDir = s"${args(0)}DS"

spark.sparkContext.esJsonRDD(s"${resource}_rdd").saveAsTextFile(rddOutputDir)

spark.sqlContext.esDF(s"${resource}_df")
.rdd
.map(row => row.toString())
.saveAsTextFile(dfOutputDir)

spark.sqlContext.read.format("es").load(s"${resource}_ds")
.rdd
.map(row => row.toString())
.saveAsTextFile(dsOutputDir)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ private[sql] case class ElasticsearchRelation(parameters: Map[String, String], @

@transient private[sql] lazy val cfg = {
val conf = new SparkSettingsManager().load(sqlContext.sparkContext.getConf).merge(parameters.asJava)
InitializationUtils.setUserProviderIfNotSet(conf, classOf[HadoopUserProvider], LogFactory.getLog(classOf[ElasticsearchRelation]))
InitializationUtils.discoverClusterInfo(conf, LogFactory.getLog(classOf[ElasticsearchRelation]))
conf
}
Expand Down Expand Up @@ -533,10 +534,10 @@ private[sql] case class ElasticsearchRelation(parameters: Map[String, String], @

// perform a scan-scroll delete
val cfgCopy = cfg.copy()
InitializationUtils.setUserProviderIfNotSet(cfgCopy, classOf[HadoopUserProvider], null)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we just roll setUserProviderIfNotSet into discoverClusterInfo to keep us from accidentally calling discoverClusterInfo without it in the future?

Copy link
Member Author

@jbaiera jbaiera Mar 28, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't disagree, but I think the problems are rooted deeper than just this initialization call. Honestly, I think all of InitializationUtils in general needs to be reworked.

InitializationUtils.discoverClusterInfo(cfgCopy, Utils.LOGGER)
InitializationUtils.setValueWriterIfNotSet(cfgCopy, classOf[JdkValueWriter], null)
InitializationUtils.setFieldExtractorIfNotSet(cfgCopy, classOf[ConstantFieldExtractor], null) //throw away extractor
InitializationUtils.setUserProviderIfNotSet(cfgCopy, classOf[HadoopUserProvider], null)
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_FLUSH_MANUAL, "false")
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_SIZE_ENTRIES, "1000")
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_SIZE_BYTES, "1mb")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_QUERY
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_RESOURCE_READ
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_RESOURCE_WRITE
import org.elasticsearch.hadoop.cfg.PropertiesSettings
import org.elasticsearch.hadoop.mr.security.HadoopUserProvider
import org.elasticsearch.hadoop.util.ObjectUtils
import org.elasticsearch.spark.cfg.SparkSettingsManager
import org.elasticsearch.hadoop.rest.InitializationUtils
Expand Down Expand Up @@ -74,6 +75,7 @@ object EsSparkSQL {
esCfg.merge(cfg.asJava)

// Need to discover es version before checking index existence
InitializationUtils.setUserProviderIfNotSet(esCfg, classOf[HadoopUserProvider], LOG)
InitializationUtils.discoverClusterInfo(esCfg, LOG)
InitializationUtils.checkIdForOperation(esCfg)
InitializationUtils.checkIndexExistence(esCfg)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ private[sql] class DefaultSource extends RelationProvider with SchemaRelationPro
s"Cannot continue with [$outputMode].")
}

InitializationUtils.setUserProviderIfNotSet(jobSettings, classOf[HadoopUserProvider], LogFactory.getLog(classOf[DefaultSource]))
InitializationUtils.discoverClusterInfo(jobSettings, LogFactory.getLog(classOf[DefaultSource]))
InitializationUtils.checkIdForOperation(jobSettings)
InitializationUtils.checkIndexExistence(jobSettings)
Expand Down Expand Up @@ -229,6 +230,7 @@ private[sql] case class ElasticsearchRelation(parameters: Map[String, String], @

@transient lazy val cfg = {
val conf = new SparkSettingsManager().load(sqlContext.sparkContext.getConf).merge(parameters.asJava)
InitializationUtils.setUserProviderIfNotSet(conf, classOf[HadoopUserProvider], LogFactory.getLog(classOf[ElasticsearchRelation]))
InitializationUtils.discoverClusterInfo(conf, LogFactory.getLog(classOf[ElasticsearchRelation]))
conf
}
Expand Down Expand Up @@ -611,10 +613,10 @@ private[sql] case class ElasticsearchRelation(parameters: Map[String, String], @

// perform a scan-scroll delete
val cfgCopy = cfg.copy()
InitializationUtils.setUserProviderIfNotSet(cfgCopy, classOf[HadoopUserProvider], null)
InitializationUtils.discoverClusterInfo(cfgCopy, Utils.LOGGER)
InitializationUtils.setValueWriterIfNotSet(cfgCopy, classOf[JdkValueWriter], null)
InitializationUtils.setFieldExtractorIfNotSet(cfgCopy, classOf[ConstantFieldExtractor], null) //throw away extractor
InitializationUtils.setUserProviderIfNotSet(cfgCopy, classOf[HadoopUserProvider], null)
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_FLUSH_MANUAL, "false")
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_SIZE_ENTRIES, "1000")
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_SIZE_BYTES, "1mb")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_QUERY
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_RESOURCE_READ
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_RESOURCE_WRITE
import org.elasticsearch.hadoop.cfg.PropertiesSettings
import org.elasticsearch.hadoop.mr.security.HadoopUserProvider
import org.elasticsearch.hadoop.rest.InitializationUtils
import org.elasticsearch.hadoop.util.ObjectUtils
import org.elasticsearch.spark.cfg.SparkSettingsManager
Expand Down Expand Up @@ -94,6 +95,7 @@ object EsSparkSQL {
esCfg.merge(cfg.asJava)

// Need to discover ES Version before checking index existence
InitializationUtils.setUserProviderIfNotSet(esCfg, classOf[HadoopUserProvider], LOG)
InitializationUtils.discoverClusterInfo(esCfg, LOG)
InitializationUtils.checkIdForOperation(esCfg)
InitializationUtils.checkIndexExistence(esCfg)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ private[sql] class DefaultSource extends RelationProvider with SchemaRelationPro
s"Cannot continue with [$outputMode].")
}

InitializationUtils.setUserProviderIfNotSet(jobSettings, classOf[HadoopUserProvider], LogFactory.getLog(classOf[DefaultSource]))
InitializationUtils.discoverClusterInfo(jobSettings, LogFactory.getLog(classOf[DefaultSource]))
InitializationUtils.checkIdForOperation(jobSettings)
InitializationUtils.checkIndexExistence(jobSettings)
Expand Down Expand Up @@ -229,6 +230,7 @@ private[sql] case class ElasticsearchRelation(parameters: Map[String, String], @

@transient lazy val cfg = {
val conf = new SparkSettingsManager().load(sqlContext.sparkContext.getConf).merge(parameters.asJava)
InitializationUtils.setUserProviderIfNotSet(conf, classOf[HadoopUserProvider], LogFactory.getLog(classOf[ElasticsearchRelation]))
InitializationUtils.discoverClusterInfo(conf, LogFactory.getLog(classOf[ElasticsearchRelation]))
conf
}
Expand Down Expand Up @@ -611,10 +613,10 @@ private[sql] case class ElasticsearchRelation(parameters: Map[String, String], @

// perform a scan-scroll delete
val cfgCopy = cfg.copy()
InitializationUtils.setUserProviderIfNotSet(cfgCopy, classOf[HadoopUserProvider], null)
InitializationUtils.discoverClusterInfo(cfgCopy, Utils.LOGGER)
InitializationUtils.setValueWriterIfNotSet(cfgCopy, classOf[JdkValueWriter], null)
InitializationUtils.setFieldExtractorIfNotSet(cfgCopy, classOf[ConstantFieldExtractor], null) //throw away extractor
InitializationUtils.setUserProviderIfNotSet(cfgCopy, classOf[HadoopUserProvider], null)
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_FLUSH_MANUAL, "false")
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_SIZE_ENTRIES, "1000")
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_SIZE_BYTES, "1mb")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_QUERY
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_RESOURCE_READ
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_RESOURCE_WRITE
import org.elasticsearch.hadoop.cfg.PropertiesSettings
import org.elasticsearch.hadoop.mr.security.HadoopUserProvider
import org.elasticsearch.hadoop.rest.InitializationUtils
import org.elasticsearch.hadoop.util.ObjectUtils
import org.elasticsearch.spark.cfg.SparkSettingsManager
Expand Down Expand Up @@ -94,6 +95,7 @@ object EsSparkSQL {
esCfg.merge(cfg.asJava)

// Need to discover ES Version before checking index existence
InitializationUtils.setUserProviderIfNotSet(esCfg, classOf[HadoopUserProvider], LOG)
InitializationUtils.discoverClusterInfo(esCfg, LOG)
InitializationUtils.checkIdForOperation(esCfg)
InitializationUtils.checkIndexExistence(esCfg)
Expand Down