public class InsertIntoHiveTable extends org.apache.spark.sql.execution.SparkPlan implements org.apache.spark.sql.execution.UnaryNode, HiveInspectors, scala.Product, scala.Serializable
HiveInspectors.typeInfoConversions
Constructor and Description |
---|
InsertIntoHiveTable(MetastoreRelation table,
scala.collection.immutable.Map<String,scala.Option<String>> partition,
org.apache.spark.sql.execution.SparkPlan child,
boolean overwrite) |
Modifier and Type | Method and Description |
---|---|
org.apache.spark.sql.execution.SparkPlan |
child() |
RDD<Row> |
execute() |
Row[] |
executeCollect() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
Class<? extends org.apache.hadoop.io.Writable> |
outputClass() |
boolean |
overwrite() |
scala.collection.immutable.Map<String,scala.Option<String>> |
partition() |
void |
saveAsHiveFile(RDD<Row> rdd,
Class<?> valueClass,
ShimFileSinkDesc fileSinkConf,
SerializableWritable<org.apache.hadoop.mapred.JobConf> conf,
SparkHiveWriterContainer writerContainer) |
HiveContext |
sc() |
MetastoreRelation |
table() |
codegenEnabled, executeTake, isTraceEnabled, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning, makeCopy, newMutableProjection, newOrdering, newPredicate, newProjection, org$apache$spark$Logging$$log__$eq, org$apache$spark$Logging$$log_, outputPartitioning, requiredChildDistribution, sparkContext, sqlContext
expressions, inputSet, missingInput, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionDown$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1, outputSet, printSchema, references, schema, schemaString, simpleString, statePrefix, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp
apply, argString, asCode, children, collect, fastEquals, flatMap, foreach, foreachUp, generateTreeString, getNodeNumbered, map, mapChildren, nodeName, numberedTreeString, origin, otherCopyArgs, stringArgs, toString, transform, transformChildrenDown, transformChildrenUp, transformDown, transformUp, treeString, withNewChildren
inspectorToDataType, javaClassToDataType, toInspector, toInspector, unwrap, wrap, wrap, wrap, wrapperFor
productArity, productElement, productIterator, productPrefix
initializeIfNecessary, initializeLogging, log_
public InsertIntoHiveTable(MetastoreRelation table, scala.collection.immutable.Map<String,scala.Option<String>> partition, org.apache.spark.sql.execution.SparkPlan child, boolean overwrite)
public MetastoreRelation table()
public scala.collection.immutable.Map<String,scala.Option<String>> partition()
public org.apache.spark.sql.execution.SparkPlan child()
child
in interface org.apache.spark.sql.catalyst.trees.UnaryNode<org.apache.spark.sql.execution.SparkPlan>
public boolean overwrite()
public HiveContext sc()
public Class<? extends org.apache.hadoop.io.Writable> outputClass()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output
in class org.apache.spark.sql.catalyst.plans.QueryPlan<org.apache.spark.sql.execution.SparkPlan>
public void saveAsHiveFile(RDD<Row> rdd, Class<?> valueClass, ShimFileSinkDesc fileSinkConf, SerializableWritable<org.apache.hadoop.mapred.JobConf> conf, SparkHiveWriterContainer writerContainer)
public Row[] executeCollect()
executeCollect
in class org.apache.spark.sql.execution.SparkPlan