public class RRDD<T> extends BaseRRDD<T,byte[]>
Constructor and Description |
---|
RRDD(RDD<T> parent,
byte[] func,
String deserializer,
String serializer,
byte[] packageNames,
String rLibDir,
Object[] broadcastVars,
scala.reflect.ClassTag<T> evidence$4) |
Modifier and Type | Method and Description |
---|---|
JavaRDD<byte[]> |
asJavaRDD() |
static JavaRDD<byte[]> |
createRDDFromArray(JavaSparkContext jsc,
byte[][] arr)
Create an RRDD given a sequence of byte arrays.
|
static org.apache.spark.api.r.BufferedStreamThread |
createRWorker(String rLibDir,
int port)
ProcessBuilder used to launch worker R processes.
|
static JavaSparkContext |
createSparkContext(String master,
String appName,
String sparkHome,
String[] jars,
java.util.Map<Object,Object> sparkEnvirMap,
java.util.Map<Object,Object> sparkExecutorEnvMap) |
compute, getPartitions
aggregate, cache, cartesian, checkpoint, checkpointData, coalesce, collect, collect, context, count, countApprox, countApproxDistinct, countApproxDistinct, countByValue, countByValueApprox, creationSite, dependencies, distinct, distinct, doubleRDDToDoubleRDDFunctions, filter, filterWith, first, flatMap, flatMapWith, fold, foreach, foreachPartition, foreachWith, getCheckpointFile, getStorageLevel, glom, groupBy, groupBy, groupBy, id, intersection, intersection, intersection, isCheckpointed, isEmpty, iterator, keyBy, map, mapPartitions, mapPartitionsWithContext, mapPartitionsWithIndex, mapPartitionsWithSplit, mapWith, max, min, name, numericRDDToDoubleRDDFunctions, partitioner, partitions, persist, persist, pipe, pipe, pipe, preferredLocations, randomSplit, rddToAsyncRDDActions, rddToOrderedRDDFunctions, rddToPairRDDFunctions, rddToSequenceFileRDDFunctions, reduce, repartition, sample, saveAsObjectFile, saveAsTextFile, saveAsTextFile, scope, setName, sortBy, sparkContext, subtract, subtract, subtract, take, takeOrdered, takeSample, toArray, toDebugString, toJavaRDD, toLocalIterator, top, toString, treeAggregate, treeReduce, union, unpersist, zip, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipWithIndex, zipWithUniqueId
initializeIfNecessary, initializeLogging, isTraceEnabled, log_, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning
public static JavaSparkContext createSparkContext(String master, String appName, String sparkHome, String[] jars, java.util.Map<Object,Object> sparkEnvirMap, java.util.Map<Object,Object> sparkExecutorEnvMap)
public static org.apache.spark.api.r.BufferedStreamThread createRWorker(String rLibDir, int port)
rLibDir
- (undocumented)port
- (undocumented)public static JavaRDD<byte[]> createRDDFromArray(JavaSparkContext jsc, byte[][] arr)
parallelize
is
called from R.jsc
- (undocumented)arr
- (undocumented)public JavaRDD<byte[]> asJavaRDD()