public class HiveTableScan extends SparkPlan implements scala.Product, scala.Serializable
Constructor and Description |
---|
HiveTableScan(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes,
org.apache.spark.sql.hive.MetastoreRelation relation,
scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> partitionPruningPred,
HiveContext sc) |
Modifier and Type | Method and Description |
---|---|
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
attributes() |
RDD<org.apache.spark.sql.catalyst.expressions.Row> |
execute()
Runs this query returning the result as an RDD.
|
org.apache.spark.sql.hive.HadoopTableReader |
hadoopReader() |
RDD<?> |
inputRdd() |
org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector |
objectInspector()
The hive object inspector for this table, which can be used to extract values from the
serialized row representation.
|
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> |
partitionPruningPred() |
org.apache.spark.sql.hive.MetastoreRelation |
relation() |
HiveContext |
sc() |
executeCollect, outputPartitioning, requiredChildDistribution
expressions, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionDown$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1, outputSet, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp
apply, argString, asCode, children, collect, fastEquals, flatMap, foreach, generateTreeString, getNodeNumbered, id, makeCopy, map, mapChildren, nextId, nodeName, numberedTreeString, otherCopyArgs, sameInstance, simpleString, stringArgs, toString, transform, transformChildrenDown, transformChildrenUp, transformDown, transformUp, treeString, withNewChildren
public HiveTableScan(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes, org.apache.spark.sql.hive.MetastoreRelation relation, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> partitionPruningPred, HiveContext sc)
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes()
public org.apache.spark.sql.hive.MetastoreRelation relation()
public scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> partitionPruningPred()
public HiveContext sc()
public org.apache.spark.sql.hive.HadoopTableReader hadoopReader()
public org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector objectInspector()
public RDD<?> inputRdd()
public RDD<org.apache.spark.sql.catalyst.expressions.Row> execute()
SparkPlan
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output
in class org.apache.spark.sql.catalyst.plans.QueryPlan<SparkPlan>