public class InsertIntoHiveTable
extends org.apache.spark.sql.execution.SparkPlan
implements org.apache.spark.sql.execution.UnaryExecNode, scala.Product, scala.Serializable
Constructor and Description |
---|
InsertIntoHiveTable(org.apache.spark.sql.hive.MetastoreRelation table,
scala.collection.immutable.Map<String,scala.Option<String>> partition,
org.apache.spark.sql.execution.SparkPlan child,
boolean overwrite,
boolean ifNotExists) |
Modifier and Type | Method and Description |
---|---|
static org.apache.spark.sql.catalyst.expressions.AttributeSeq |
allAttributes() |
static BaseType |
apply(int number) |
static String |
argString() |
static String |
asCode() |
abstract static boolean |
canEqual(Object that) |
org.apache.spark.sql.execution.SparkPlan |
child() |
static scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan> |
children() |
static <B> scala.collection.Seq<B> |
collect(scala.PartialFunction<BaseType,B> pf) |
static <B> scala.Option<B> |
collectFirst(scala.PartialFunction<BaseType,B> pf) |
static org.apache.spark.sql.catalyst.expressions.ExpressionSet |
constraints() |
static scala.collection.immutable.Set<org.apache.spark.sql.catalyst.trees.TreeNode<?>> |
containsChild() |
abstract static boolean |
equals(Object that) |
static RDD<org.apache.spark.sql.catalyst.InternalRow> |
execute() |
static <T> Broadcast<T> |
executeBroadcast() |
org.apache.spark.sql.catalyst.InternalRow[] |
executeCollect() |
static Row[] |
executeCollectPublic() |
static org.apache.spark.sql.catalyst.InternalRow[] |
executeTake(int n) |
static scala.collection.Iterator<org.apache.spark.sql.catalyst.InternalRow> |
executeToIterator() |
static scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> |
expressions() |
static boolean |
fastEquals(org.apache.spark.sql.catalyst.trees.TreeNode<?> other) |
static scala.Option<BaseType> |
find(scala.Function1<BaseType,Object> f) |
static <A> scala.collection.Seq<A> |
flatMap(scala.Function1<BaseType,scala.collection.TraversableOnce<A>> f) |
static void |
foreach(scala.Function1<BaseType,scala.runtime.BoxedUnit> f) |
static void |
foreachUp(scala.Function1<BaseType,scala.runtime.BoxedUnit> f) |
static scala.collection.mutable.StringBuilder |
generateTreeString(int depth,
scala.collection.Seq<Object> lastChildren,
scala.collection.mutable.StringBuilder builder,
boolean verbose,
String prefix) |
static String |
generateTreeString$default$5() |
org.apache.hadoop.fs.Path |
getExternalTmpPath(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration hadoopConf) |
org.apache.hadoop.fs.Path |
getExtTmpPathRelTo(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration hadoopConf) |
static int |
hashCode() |
boolean |
ifNotExists() |
void |
initializeLogging(boolean isInterpreter) |
static org.apache.spark.sql.catalyst.expressions.AttributeSet |
inputSet() |
org.slf4j.Logger |
log_() |
static org.apache.spark.sql.execution.metric.SQLMetric |
longMetric(String name) |
static org.apache.spark.sql.execution.SparkPlan |
makeCopy(Object[] newArgs) |
static <A> scala.collection.Seq<A> |
map(scala.Function1<BaseType,A> f) |
static BaseType |
mapChildren(scala.Function1<BaseType,BaseType> f) |
static scala.collection.immutable.Map<String,String> |
metadata() |
static scala.collection.immutable.Map<String,org.apache.spark.sql.execution.metric.SQLMetric> |
metrics() |
static org.apache.spark.sql.catalyst.expressions.AttributeSet |
missingInput() |
static String |
nodeName() |
static String |
numberedTreeString() |
static org.apache.spark.sql.catalyst.trees.Origin |
origin() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
static scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> |
outputOrdering() |
static org.apache.spark.sql.catalyst.plans.physical.Partitioning |
outputPartitioning() |
static org.apache.spark.sql.catalyst.expressions.AttributeSet |
outputSet() |
boolean |
overwrite() |
scala.collection.immutable.Map<String,scala.Option<String>> |
partition() |
static void |
prepare() |
static String |
prettyJson() |
static void |
printSchema() |
static org.apache.spark.sql.catalyst.expressions.AttributeSet |
producedAttributes() |
abstract static int |
productArity() |
abstract static Object |
productElement(int n) |
static scala.collection.Iterator<Object> |
productIterator() |
static String |
productPrefix() |
static org.apache.spark.sql.catalyst.expressions.AttributeSet |
references() |
static scala.collection.Seq<org.apache.spark.sql.catalyst.plans.physical.Distribution> |
requiredChildDistribution() |
static scala.collection.Seq<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>> |
requiredChildOrdering() |
static void |
resetMetrics() |
static boolean |
sameResult(PlanType plan) |
static StructType |
schema() |
static String |
schemaString() |
static String |
simpleString() |
static SQLContext |
sqlContext() |
String |
stagingDir() |
static boolean |
subexpressionEliminationEnabled() |
static scala.collection.Seq<PlanType> |
subqueries() |
org.apache.spark.sql.hive.MetastoreRelation |
table() |
static String |
toJSON() |
static String |
toString() |
static BaseType |
transform(scala.PartialFunction<BaseType,BaseType> rule) |
static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> |
transformAllExpressions(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule) |
static BaseType |
transformDown(scala.PartialFunction<BaseType,BaseType> rule) |
static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> |
transformExpressions(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule) |
static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> |
transformExpressionsDown(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule) |
static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> |
transformExpressionsUp(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule) |
static BaseType |
transformUp(scala.PartialFunction<BaseType,BaseType> rule) |
static String |
treeString() |
static String |
treeString(boolean verbose) |
static String |
verboseString() |
static BaseType |
withNewChildren(scala.collection.Seq<BaseType> newChildren) |
doExecuteBroadcast, doPrepare, execute, executeBroadcast, executeCollectPublic, executeQuery, executeTake, executeToIterator, initializeLogIfNecessary, isTraceEnabled, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning, longMetric, makeCopy, metadata, metrics, newMutableProjection, newMutableProjection$default$3, newNaturalAscendingOrdering, newOrdering, newPredicate, org$apache$spark$internal$Logging$$log__$eq, org$apache$spark$internal$Logging$$log_, org$apache$spark$sql$execution$SparkPlan$$decodeUnsafeRows, org$apache$spark$sql$execution$SparkPlan$$subqueryResults, outputOrdering, outputPartitioning, prepare, prepareSubqueries, requiredChildDistribution, requiredChildOrdering, resetMetrics, sparkContext, sqlContext, subexpressionEliminationEnabled, waitForSubqueries
allAttributes, canonicalized, cleanArgs, constraints, expressions, getRelevantConstraints, innerChildren, inputSet, missingInput, org$apache$spark$sql$catalyst$plans$QueryPlan$$cleanArg$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$2, org$apache$spark$sql$catalyst$plans$QueryPlan$$scanNullIntolerantExpr, org$apache$spark$sql$catalyst$plans$QueryPlan$$seqToExpressions$1, outputSet, printSchema, producedAttributes, references, sameResult, schema, schemaString, simpleString, statePrefix, subqueries, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp, validConstraints, verboseString
apply, argString, asCode, children, collect, collectFirst, containsChild, fastEquals, find, flatMap, foreach, foreachUp, fromJSON, generateTreeString, generateTreeString$default$5, getNodeNumbered, hashCode, jsonFields, map, mapChildren, mapProductIterator, nodeName, numberedTreeString, org$apache$spark$sql$catalyst$trees$TreeNode$$allChildren, org$apache$spark$sql$catalyst$trees$TreeNode$$collectJsonValue$1, org$apache$spark$sql$catalyst$trees$TreeNode$$parseToJson, origin, otherCopyArgs, prettyJson, productIterator, productPrefix, stringArgs, toJSON, toString, transform, transformChildren, transformDown, transformUp, treeString, treeString, withNewChildren
children, outputPartitioning
public InsertIntoHiveTable(org.apache.spark.sql.hive.MetastoreRelation table, scala.collection.immutable.Map<String,scala.Option<String>> partition, org.apache.spark.sql.execution.SparkPlan child, boolean overwrite, boolean ifNotExists)
public abstract static boolean canEqual(Object that)
public abstract static boolean equals(Object that)
public abstract static Object productElement(int n)
public abstract static int productArity()
public static scala.collection.Iterator<Object> productIterator()
public static String productPrefix()
public static org.apache.spark.sql.catalyst.trees.Origin origin()
public static scala.collection.immutable.Set<org.apache.spark.sql.catalyst.trees.TreeNode<?>> containsChild()
public static int hashCode()
public static boolean fastEquals(org.apache.spark.sql.catalyst.trees.TreeNode<?> other)
public static scala.Option<BaseType> find(scala.Function1<BaseType,Object> f)
public static void foreach(scala.Function1<BaseType,scala.runtime.BoxedUnit> f)
public static void foreachUp(scala.Function1<BaseType,scala.runtime.BoxedUnit> f)
public static <A> scala.collection.Seq<A> map(scala.Function1<BaseType,A> f)
public static <A> scala.collection.Seq<A> flatMap(scala.Function1<BaseType,scala.collection.TraversableOnce<A>> f)
public static <B> scala.collection.Seq<B> collect(scala.PartialFunction<BaseType,B> pf)
public static <B> scala.Option<B> collectFirst(scala.PartialFunction<BaseType,B> pf)
public static BaseType mapChildren(scala.Function1<BaseType,BaseType> f)
public static BaseType withNewChildren(scala.collection.Seq<BaseType> newChildren)
public static BaseType transform(scala.PartialFunction<BaseType,BaseType> rule)
public static BaseType transformDown(scala.PartialFunction<BaseType,BaseType> rule)
public static BaseType transformUp(scala.PartialFunction<BaseType,BaseType> rule)
public static String nodeName()
public static String argString()
public static String toString()
public static String treeString()
public static String treeString(boolean verbose)
public static String numberedTreeString()
public static BaseType apply(int number)
public static scala.collection.mutable.StringBuilder generateTreeString(int depth, scala.collection.Seq<Object> lastChildren, scala.collection.mutable.StringBuilder builder, boolean verbose, String prefix)
public static String asCode()
public static String toJSON()
public static String prettyJson()
public static String generateTreeString$default$5()
public static org.apache.spark.sql.catalyst.expressions.ExpressionSet constraints()
public static org.apache.spark.sql.catalyst.expressions.AttributeSet outputSet()
public static org.apache.spark.sql.catalyst.expressions.AttributeSet references()
public static org.apache.spark.sql.catalyst.expressions.AttributeSet inputSet()
public static org.apache.spark.sql.catalyst.expressions.AttributeSet producedAttributes()
public static org.apache.spark.sql.catalyst.expressions.AttributeSet missingInput()
public static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> transformExpressions(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule)
public static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> transformExpressionsDown(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule)
public static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> transformExpressionsUp(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule)
public static org.apache.spark.sql.catalyst.plans.QueryPlan<PlanType> transformAllExpressions(scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> rule)
public static final scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> expressions()
public static StructType schema()
public static String schemaString()
public static void printSchema()
public static String simpleString()
public static String verboseString()
public static scala.collection.Seq<PlanType> subqueries()
public static boolean sameResult(PlanType plan)
public static org.apache.spark.sql.catalyst.expressions.AttributeSeq allAttributes()
public static final SQLContext sqlContext()
public static boolean subexpressionEliminationEnabled()
public static org.apache.spark.sql.execution.SparkPlan makeCopy(Object[] newArgs)
public static scala.collection.immutable.Map<String,String> metadata()
public static scala.collection.immutable.Map<String,org.apache.spark.sql.execution.metric.SQLMetric> metrics()
public static void resetMetrics()
public static org.apache.spark.sql.execution.metric.SQLMetric longMetric(String name)
public static scala.collection.Seq<org.apache.spark.sql.catalyst.plans.physical.Distribution> requiredChildDistribution()
public static scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder> outputOrdering()
public static scala.collection.Seq<scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.SortOrder>> requiredChildOrdering()
public static final RDD<org.apache.spark.sql.catalyst.InternalRow> execute()
public static final <T> Broadcast<T> executeBroadcast()
public static final void prepare()
public static scala.collection.Iterator<org.apache.spark.sql.catalyst.InternalRow> executeToIterator()
public static Row[] executeCollectPublic()
public static org.apache.spark.sql.catalyst.InternalRow[] executeTake(int n)
public static scala.collection.Seq<org.apache.spark.sql.execution.SparkPlan> children()
public static org.apache.spark.sql.catalyst.plans.physical.Partitioning outputPartitioning()
public org.apache.spark.sql.hive.MetastoreRelation table()
public scala.collection.immutable.Map<String,scala.Option<String>> partition()
public org.apache.spark.sql.execution.SparkPlan child()
child
in interface org.apache.spark.sql.execution.UnaryExecNode
public boolean overwrite()
public boolean ifNotExists()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output
in class org.apache.spark.sql.catalyst.plans.QueryPlan<org.apache.spark.sql.execution.SparkPlan>
public String stagingDir()
public org.apache.hadoop.fs.Path getExternalTmpPath(org.apache.hadoop.fs.Path path, org.apache.hadoop.conf.Configuration hadoopConf)
public org.apache.hadoop.fs.Path getExtTmpPathRelTo(org.apache.hadoop.fs.Path path, org.apache.hadoop.conf.Configuration hadoopConf)
public org.apache.spark.sql.catalyst.InternalRow[] executeCollect()
executeCollect
in class org.apache.spark.sql.execution.SparkPlan
public org.slf4j.Logger log_()
public void initializeLogging(boolean isInterpreter)