class
HadoopWriter extends Logging with HadoopMapRedUtil with Serializable
Instance Constructors
-
new
HadoopWriter(jobConf: JobConf)
Value Members
-
final
def
!=(arg0: AnyRef): Boolean
-
final
def
!=(arg0: Any): Boolean
-
final
def
##(): Int
-
final
def
==(arg0: AnyRef): Boolean
-
final
def
==(arg0: Any): Boolean
-
final
def
asInstanceOf[T0]: T0
-
def
cleanup(): Unit
-
def
clone(): AnyRef
-
def
close(): Unit
-
def
commit(): Unit
-
def
commitJob(): Unit
-
final
def
eq(arg0: AnyRef): Boolean
-
def
equals(arg0: Any): Boolean
-
def
finalize(): Unit
-
final
def
getClass(): java.lang.Class[_]
-
def
hashCode(): Int
-
def
initLogging(): Unit
-
final
def
isInstanceOf[T0]: Boolean
-
def
log: Logger
-
def
logDebug(msg: ⇒ String, throwable: Throwable): Unit
-
def
logDebug(msg: ⇒ String): Unit
-
def
logError(msg: ⇒ String, throwable: Throwable): Unit
-
def
logError(msg: ⇒ String): Unit
-
def
logInfo(msg: ⇒ String, throwable: Throwable): Unit
-
def
logInfo(msg: ⇒ String): Unit
-
def
logTrace(msg: ⇒ String, throwable: Throwable): Unit
-
def
logTrace(msg: ⇒ String): Unit
-
def
logWarning(msg: ⇒ String, throwable: Throwable): Unit
-
def
logWarning(msg: ⇒ String): Unit
-
final
def
ne(arg0: AnyRef): Boolean
-
def
newJobContext(conf: JobConf, jobId: JobID): JobContext
-
def
newTaskAttemptContext(conf: JobConf, attemptId: TaskAttemptID): TaskAttemptContext
-
final
def
notify(): Unit
-
final
def
notifyAll(): Unit
-
def
open(): Unit
-
def
preSetup(): Unit
-
def
setup(jobid: Int, splitid: Int, attemptid: Int): Unit
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
-
def
toString(): String
-
final
def
wait(): Unit
-
final
def
wait(arg0: Long, arg1: Int): Unit
-
final
def
wait(arg0: Long): Unit
-
def
write(key: AnyRef, value: AnyRef): Unit
Value Members
-
def
newJobContext(conf: JobConf, jobId: JobID): JobContext
-
def
newTaskAttemptContext(conf: JobConf, attemptId: TaskAttemptID): TaskAttemptContext
Inherited from Logging
Value Members
-
def
initLogging(): Unit
-
def
log: Logger
-
def
logDebug(msg: ⇒ String, throwable: Throwable): Unit
-
def
logDebug(msg: ⇒ String): Unit
-
def
logError(msg: ⇒ String, throwable: Throwable): Unit
-
def
logError(msg: ⇒ String): Unit
-
def
logInfo(msg: ⇒ String, throwable: Throwable): Unit
-
def
logInfo(msg: ⇒ String): Unit
-
def
logTrace(msg: ⇒ String, throwable: Throwable): Unit
-
def
logTrace(msg: ⇒ String): Unit
-
def
logWarning(msg: ⇒ String, throwable: Throwable): Unit
-
def
logWarning(msg: ⇒ String): Unit
Inherited from AnyRef
Value Members
-
final
def
!=(arg0: AnyRef): Boolean
-
final
def
##(): Int
-
final
def
==(arg0: AnyRef): Boolean
-
def
clone(): AnyRef
-
final
def
eq(arg0: AnyRef): Boolean
-
def
equals(arg0: Any): Boolean
-
def
finalize(): Unit
-
final
def
getClass(): java.lang.Class[_]
-
def
hashCode(): Int
-
final
def
ne(arg0: AnyRef): Boolean
-
final
def
notify(): Unit
-
final
def
notifyAll(): Unit
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
-
def
toString(): String
-
final
def
wait(): Unit
-
final
def
wait(arg0: Long, arg1: Int): Unit
-
final
def
wait(arg0: Long): Unit
Inherited from Any
Value Members
-
final
def
!=(arg0: Any): Boolean
-
final
def
==(arg0: Any): Boolean
-
final
def
asInstanceOf[T0]: T0
-
final
def
isInstanceOf[T0]: Boolean
Internal helper class that saves an RDD using a Hadoop OutputFormat. This is only public because we need to access this class from the
spark
package to use some package-private Hadoop functions, but this class should not be used directly by users.Saves the RDD using a JobConf, which should contain an output key class, an output value class, a filename to write to, etc, exactly like in a Hadoop MapReduce job.