public class HiveTableScan extends SparkPlan implements scala.Product, scala.Serializable
Constructor and Description |
---|
HiveTableScan(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes,
org.apache.spark.sql.hive.MetastoreRelation relation,
scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> partitionPruningPred,
HiveContext context) |
Modifier and Type | Method and Description |
---|---|
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
attributes() |
HiveContext |
context() |
RDD<org.apache.spark.sql.catalyst.expressions.Row> |
execute()
Runs this query returning the result as an RDD.
|
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> |
partitionPruningPred() |
org.apache.spark.sql.hive.MetastoreRelation |
relation() |
codegenEnabled, executeCollect, makeCopy, outputPartitioning, requiredChildDistribution
expressions, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionDown$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1, outputSet, printSchema, schema, schemaString, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp
apply, argString, asCode, children, collect, fastEquals, flatMap, foreach, generateTreeString, getNodeNumbered, id, map, mapChildren, nextId, nodeName, numberedTreeString, otherCopyArgs, sameInstance, simpleString, stringArgs, toString, transform, transformChildrenDown, transformChildrenUp, transformDown, transformUp, treeString, withNewChildren
productArity, productElement, productIterator, productPrefix
initialized, initializeIfNecessary, initializeLogging, initLock, isTraceEnabled, log_, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning
public HiveTableScan(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes, org.apache.spark.sql.hive.MetastoreRelation relation, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> partitionPruningPred, HiveContext context)
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes()
public org.apache.spark.sql.hive.MetastoreRelation relation()
public scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> partitionPruningPred()
public HiveContext context()
public RDD<org.apache.spark.sql.catalyst.expressions.Row> execute()
SparkPlan
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output
in class org.apache.spark.sql.catalyst.plans.QueryPlan<SparkPlan>