public class SparkHadoopWriter extends Object implements Logging, SparkHadoopMapRedUtil, scala.Serializable
Saves the RDD using a JobConf, which should contain an output key class, an output value class, a filename to write to, etc, exactly like in a Hadoop MapReduce job.
Constructor and Description |
---|
SparkHadoopWriter(org.apache.hadoop.mapred.JobConf jobConf) |
Modifier and Type | Method and Description |
---|---|
void |
close() |
void |
commit() |
void |
commitJob() |
static org.apache.hadoop.mapred.JobID |
createJobID(java.util.Date time,
int id) |
static org.apache.hadoop.fs.Path |
createPathFromString(String path,
org.apache.hadoop.mapred.JobConf conf) |
void |
open() |
void |
preSetup() |
void |
setup(int jobid,
int splitid,
int attemptid) |
void |
write(Object key,
Object value) |
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
initializeIfNecessary, initializeLogging, isTraceEnabled, log_, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning
firstAvailableClass, newJobContext, newTaskAttemptContext, newTaskAttemptID
public SparkHadoopWriter(org.apache.hadoop.mapred.JobConf jobConf)
public static org.apache.hadoop.mapred.JobID createJobID(java.util.Date time, int id)
public static org.apache.hadoop.fs.Path createPathFromString(String path, org.apache.hadoop.mapred.JobConf conf)
public void preSetup()
public void setup(int jobid, int splitid, int attemptid)
public void open()
public void write(Object key, Object value)
public void close()
public void commit()
public void commitJob()