public class RRDD<T> extends BaseRRDD<T,byte[]>
Constructor and Description |
---|
RRDD(RDD<T> parent,
byte[] func,
java.lang.String deserializer,
java.lang.String serializer,
byte[] packageNames,
java.lang.Object[] broadcastVars,
scala.reflect.ClassTag<T> evidence$4) |
Modifier and Type | Method and Description |
---|---|
JavaRDD<byte[]> |
asJavaRDD() |
static JavaRDD<byte[]> |
createRDDFromArray(JavaSparkContext jsc,
byte[][] arr)
Create an RRDD given a sequence of byte arrays.
|
static org.apache.spark.api.r.BufferedStreamThread |
createRWorker(int port)
ProcessBuilder used to launch worker R processes.
|
static JavaSparkContext |
createSparkContext(java.lang.String master,
java.lang.String appName,
java.lang.String sparkHome,
java.lang.String[] jars,
java.util.Map<java.lang.Object,java.lang.Object> sparkEnvirMap,
java.util.Map<java.lang.Object,java.lang.Object> sparkExecutorEnvMap) |
protected byte[] |
readData(int length) |
compute, dataStream, getPartitions, read
aggregate, cache, cartesian, checkpoint, checkpointData, clearDependencies, coalesce, collect, collect, context, count, countApprox, countApproxDistinct, countApproxDistinct, countByValue, countByValueApprox, creationSite, dependencies, distinct, distinct, doubleRDDToDoubleRDDFunctions, filter, filterWith, first, firstParent, flatMap, flatMapWith, fold, foreach, foreachPartition, foreachWith, getCheckpointFile, getDependencies, getNumPartitions, getPreferredLocations, getStorageLevel, glom, groupBy, groupBy, groupBy, id, intersection, intersection, intersection, isCheckpointed, isEmpty, iterator, keyBy, localCheckpoint, map, mapPartitions, mapPartitionsWithContext, mapPartitionsWithIndex, mapPartitionsWithSplit, mapWith, max, min, name, numericRDDToDoubleRDDFunctions, parent, partitioner, partitions, persist, persist, pipe, pipe, pipe, preferredLocations, randomSplit, rddToAsyncRDDActions, rddToOrderedRDDFunctions, rddToPairRDDFunctions, rddToSequenceFileRDDFunctions, reduce, repartition, sample, saveAsObjectFile, saveAsTextFile, saveAsTextFile, scope, setName, sortBy, sparkContext, subtract, subtract, subtract, take, takeOrdered, takeSample, toArray, toDebugString, toJavaRDD, toLocalIterator, top, toString, treeAggregate, treeReduce, union, unpersist, zip, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipWithIndex, zipWithUniqueId
clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
initializeIfNecessary, initializeLogging, isTraceEnabled, log_, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning
public static JavaSparkContext createSparkContext(java.lang.String master, java.lang.String appName, java.lang.String sparkHome, java.lang.String[] jars, java.util.Map<java.lang.Object,java.lang.Object> sparkEnvirMap, java.util.Map<java.lang.Object,java.lang.Object> sparkExecutorEnvMap)
public static org.apache.spark.api.r.BufferedStreamThread createRWorker(int port)
port
- (undocumented)public static JavaRDD<byte[]> createRDDFromArray(JavaSparkContext jsc, byte[][] arr)
parallelize
is
called from R.jsc
- (undocumented)arr
- (undocumented)public JavaRDD<byte[]> asJavaRDD()