public class InMemoryColumnarTableScan
extends org.apache.spark.sql.execution.SparkPlan
implements org.apache.spark.sql.execution.LeafNode, scala.Product, scala.Serializable
Constructor and Description |
---|
InMemoryColumnarTableScan(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes,
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> predicates,
InMemoryRelation relation) |
Modifier and Type | Method and Description |
---|---|
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
attributes() |
scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> |
buildFilter() |
RDD<Row> |
execute() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> |
partitionFilters() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> |
predicates() |
Accumulator<Object> |
readBatches() |
Accumulator<Object> |
readPartitions() |
InMemoryRelation |
relation() |
codegenEnabled, executeCollect, executeTake, isTraceEnabled, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning, makeCopy, newMutableProjection, newOrdering, newPredicate, newProjection, org$apache$spark$Logging$$log__$eq, org$apache$spark$Logging$$log_, outputPartitioning, requiredChildDistribution, sparkContext, sqlContext
expressions, inputSet, missingInput, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionDown$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1, outputSet, printSchema, references, schema, schemaString, simpleString, statePrefix, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp
apply, argString, asCode, children, collect, fastEquals, flatMap, foreach, foreachUp, generateTreeString, getNodeNumbered, map, mapChildren, nodeName, numberedTreeString, origin, otherCopyArgs, stringArgs, toString, transform, transformChildrenDown, transformChildrenUp, transformDown, transformUp, treeString, withNewChildren
productArity, productElement, productIterator, productPrefix
initializeIfNecessary, initializeLogging, log_
public InMemoryColumnarTableScan(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> predicates, InMemoryRelation relation)
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> attributes()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> predicates()
public InMemoryRelation relation()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output
in class org.apache.spark.sql.catalyst.plans.QueryPlan<org.apache.spark.sql.execution.SparkPlan>
public scala.PartialFunction<org.apache.spark.sql.catalyst.expressions.Expression,org.apache.spark.sql.catalyst.expressions.Expression> buildFilter()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> partitionFilters()
public Accumulator<Object> readPartitions()
public Accumulator<Object> readBatches()