[[ {SparkR}R Documentation

Subset

Description

Return subsets of SparkDataFrame according to given conditions

Usage

## S4 method for signature 'SparkDataFrame,numericOrcharacter'
x[[i]]

## S4 replacement method for signature 'SparkDataFrame,numericOrcharacter'
x[[i]] <- value

## S4 method for signature 'SparkDataFrame'
x[i, j, ..., drop = F]

## S4 method for signature 'SparkDataFrame'
subset(x, subset, select, drop = F, ...)

subset(x, ...)

Arguments

x

a SparkDataFrame.

i,subset

(Optional) a logical expression to filter on rows. For extract operator [[ and replacement operator [[<-, the indexing parameter for a single Column.

value

a Column or an atomic vector in the length of 1 as literal value, or NULL. If NULL, the specified Column is dropped.

j,select

expression for the single Column or a list of columns to select from the SparkDataFrame.

...

currently not used.

drop

if TRUE, a Column will be returned if the resulting dataset has only one column. Otherwise, a SparkDataFrame will always be returned.

Value

A new SparkDataFrame containing only the rows that meet the condition with selected columns.

Note

[[ since 1.4.0

[[<- since 2.1.1

[ since 1.4.0

subset since 1.5.0

See Also

withColumn

Other SparkDataFrame functions: $, $,SparkDataFrame-method, $<-, $<-,SparkDataFrame-method, select, select, select,SparkDataFrame,Column-method, select,SparkDataFrame,character-method, select,SparkDataFrame,list-method; SparkDataFrame-class; agg, agg, agg, agg,GroupedData-method, agg,SparkDataFrame-method, summarize, summarize, summarize, summarize,GroupedData-method, summarize,SparkDataFrame-method; arrange, arrange, arrange, arrange,SparkDataFrame,Column-method, arrange,SparkDataFrame,character-method, orderBy,SparkDataFrame,characterOrColumn-method; as.data.frame, as.data.frame,SparkDataFrame-method; attach, attach,SparkDataFrame-method; cache, cache, cache,SparkDataFrame-method; coalesce, coalesce, coalesce, coalesce,Column-method, coalesce,SparkDataFrame-method; collect, collect, collect,SparkDataFrame-method; colnames, colnames, colnames,SparkDataFrame-method, colnames<-, colnames<-, colnames<-,SparkDataFrame-method, columns, columns, columns,SparkDataFrame-method, names, names,SparkDataFrame-method, names<-, names<-,SparkDataFrame-method; coltypes, coltypes, coltypes,SparkDataFrame-method, coltypes<-, coltypes<-, coltypes<-,SparkDataFrame,character-method; count,SparkDataFrame-method, nrow, nrow, nrow,SparkDataFrame-method; createOrReplaceTempView, createOrReplaceTempView, createOrReplaceTempView,SparkDataFrame,character-method; crossJoin, crossJoin,SparkDataFrame,SparkDataFrame-method; dapplyCollect, dapplyCollect, dapplyCollect,SparkDataFrame,function-method; dapply, dapply, dapply,SparkDataFrame,function,structType-method; describe, describe, describe, describe,SparkDataFrame,ANY-method, describe,SparkDataFrame,character-method, describe,SparkDataFrame-method, summary, summary, summary,SparkDataFrame-method; dim, dim,SparkDataFrame-method; distinct, distinct, distinct,SparkDataFrame-method, unique, unique,SparkDataFrame-method; dropDuplicates, dropDuplicates, dropDuplicates,SparkDataFrame-method; dropna, dropna, dropna,SparkDataFrame-method, fillna, fillna, fillna,SparkDataFrame-method, na.omit, na.omit, na.omit,SparkDataFrame-method; drop, drop, drop, drop,ANY-method, drop,SparkDataFrame-method; dtypes, dtypes, dtypes,SparkDataFrame-method; except, except, except,SparkDataFrame,SparkDataFrame-method; explain, explain, explain,SparkDataFrame-method; filter, filter, filter,SparkDataFrame,characterOrColumn-method, where, where, where,SparkDataFrame,characterOrColumn-method; first, first, first, first,SparkDataFrame-method, first,characterOrColumn-method; gapplyCollect, gapplyCollect, gapplyCollect, gapplyCollect,GroupedData-method, gapplyCollect,SparkDataFrame-method; gapply, gapply, gapply, gapply,GroupedData-method, gapply,SparkDataFrame-method; getNumPartitions, getNumPartitions,SparkDataFrame-method; groupBy, groupBy, groupBy,SparkDataFrame-method, group_by, group_by, group_by,SparkDataFrame-method; head, head,SparkDataFrame-method; histogram, histogram,SparkDataFrame,characterOrColumn-method; insertInto, insertInto, insertInto,SparkDataFrame,character-method; intersect, intersect, intersect,SparkDataFrame,SparkDataFrame-method; isLocal, isLocal, isLocal,SparkDataFrame-method; join, join,SparkDataFrame,SparkDataFrame-method; limit, limit, limit,SparkDataFrame,numeric-method; merge, merge, merge,SparkDataFrame,SparkDataFrame-method; mutate, mutate, mutate,SparkDataFrame-method, transform, transform, transform,SparkDataFrame-method; ncol, ncol,SparkDataFrame-method; persist, persist, persist,SparkDataFrame,character-method; printSchema, printSchema, printSchema,SparkDataFrame-method; randomSplit, randomSplit, randomSplit,SparkDataFrame,numeric-method; rbind, rbind, rbind,SparkDataFrame-method; registerTempTable, registerTempTable, registerTempTable,SparkDataFrame,character-method; rename, rename, rename,SparkDataFrame-method, withColumnRenamed, withColumnRenamed, withColumnRenamed,SparkDataFrame,character,character-method; repartition, repartition, repartition,SparkDataFrame-method; sample, sample, sample,SparkDataFrame,logical,numeric-method, sample_frac, sample_frac, sample_frac,SparkDataFrame,logical,numeric-method; saveAsParquetFile, saveAsParquetFile, saveAsParquetFile,SparkDataFrame,character-method, write.parquet, write.parquet, write.parquet,SparkDataFrame,character-method; saveAsTable, saveAsTable, saveAsTable,SparkDataFrame,character-method; saveDF, saveDF, saveDF,SparkDataFrame,character-method, write.df, write.df, write.df, write.df,SparkDataFrame-method; schema, schema, schema,SparkDataFrame-method; selectExpr, selectExpr, selectExpr,SparkDataFrame,character-method; showDF, showDF, showDF,SparkDataFrame-method; show, show, show,Column-method, show,GroupedData-method, show,SparkDataFrame-method, show,WindowSpec-method; storageLevel, storageLevel,SparkDataFrame-method; str, str,SparkDataFrame-method; take, take, take,SparkDataFrame,numeric-method; union, union, union,SparkDataFrame,SparkDataFrame-method, unionAll, unionAll, unionAll,SparkDataFrame,SparkDataFrame-method; unpersist, unpersist, unpersist,SparkDataFrame-method; withColumn, withColumn, withColumn,SparkDataFrame,character-method; with, with,SparkDataFrame-method; write.jdbc, write.jdbc, write.jdbc,SparkDataFrame,character,character-method; write.json, write.json, write.json,SparkDataFrame,character-method; write.orc, write.orc, write.orc,SparkDataFrame,character-method; write.text, write.text, write.text,SparkDataFrame,character-method

Other subsetting functions: $, $,SparkDataFrame-method, $<-, $<-,SparkDataFrame-method, select, select, select,SparkDataFrame,Column-method, select,SparkDataFrame,character-method, select,SparkDataFrame,list-method; filter, filter, filter,SparkDataFrame,characterOrColumn-method, where, where, where,SparkDataFrame,characterOrColumn-method

Examples

## Not run: 
##D   # Columns can be selected using [[ and [
##D   df[[2]] == df[["age"]]
##D   df[,2] == df[,"age"]
##D   df[,c("name", "age")]
##D   # Or to filter rows
##D   df[df$age > 20,]
##D   # SparkDataFrame can be subset on both rows and Columns
##D   df[df$name == "Smith", c(1,2)]
##D   df[df$age %in% c(19, 30), 1:2]
##D   subset(df, df$age %in% c(19, 30), 1:2)
##D   subset(df, df$age %in% c(19), select = c(1,2))
##D   subset(df, select = c(1,2))
##D   # Columns can be selected and set
##D   df[["age"]] <- 23
##D   df[[1]] <- df$age
##D   df[[2]] <- NULL # drop column
## End(Not run)

[Package SparkR version 2.1.1 Index]