case classInsertIntoParquetTable(relation: ParquetRelation, child: SparkPlan, overwrite: Boolean = false)(sc: SparkContext) extends SparkPlan with UnaryNode with SparkHadoopMapReduceUtil with Product with Serializable
Operator that acts as a sink for queries on RDDs and can be used to
store the output inside a directory of Parquet files.
case classParquetTableScan(output: Seq[Attribute], relation: ParquetRelation, columnPruningPred: Option[Expression])(sc: SparkContext) extends SparkPlan with LeafNode with Product with Serializable