DAGScheduler原理剖析與原始碼分析
阿新 • • 發佈:2019-02-12
stage劃分演算法:必須對stage劃分演算法很清晰,知道自己的Application被劃分了幾個job,每個job被劃分了幾個stage,每個stage有哪些程式碼,只能在線上報錯的資訊上更快的發現問題或者效能調優。
//DAGscheduler的job排程的核心入口
private[scheduler] def handleJobSubmitted(jobId: Int,
finalRDD: RDD[_],
func: (TaskContext, Iterator[_]) => _,
partitions: Array[Int],
callSite: CallSite,
listener: JobListener,
properties: Properties) {
//使用觸發job的最後一個RDD建立finalStage
var finalStage: ResultStage = null
try {
// New stage creation may throw an exception if, for example, jobs are run on a
// HadoopRDD whose underlying HDFS files have been deleted.
//將stage新增到DAGSchedule快取中
finalStage = newResultStage(finalRDD, func, partitions, jobId, callSite)
} catch {
case e: Exception =>
logWarning("Creating new stage failed due to exception - job: " + jobId, e)
listener.jobFailed(e)
return
}
//使用finalStage建立一個Job(最後的stage就是finalStage)
val job = new ActiveJob(jobId, finalStage, callSite, listener, properties)
clearCacheLocs()
logInfo("Got job %s (%s) with %d output partitions" .format(
job.jobId, callSite.shortForm, partitions.length))
logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")")
logInfo("Parents of final stage: " + finalStage.parents)
logInfo("Missing parents: " + getMissingParentStages(finalStage))
val jobSubmissionTime = clock.getTimeMillis()
//將job加入到記憶體快取中
jobIdToActiveJob(jobId) = job
activeJobs += job
finalStage.setActiveJob(job)
val stageIds = jobIdToStageIds(jobId).toArray
val stageInfos = stageIds.flatMap(id => stageIdToStage.get(id).map(_.latestInfo))
listenerBus.post(
SparkListenerJobStart(job.jobId, jobSubmissionTime, stageInfos, properties))
//使用submitStage提交finalStage
//這個方法的呼叫,其實會導致第一個stage的提交,其餘的stage都儲存在棧裡。
submitStage(finalStage)
//提交等待的stage
submitWaitingStages()
}
//提交stage的方法
//這裡其實就是stage劃分演算法的入口
//但是stage劃分演算法是submitStage()和getMissingParentStages()方法共同組成的。
private def submitStage(stage: Stage) {
val jobId = activeJobForStage(stage)
if (jobId.isDefined) {
logDebug("submitStage(" + stage + ")")
if (!waitingStages(stage) && !runningStages(stage) && !failedStages(stage)) {
//呼叫getMissingParentStages()獲取父stage
val missing = getMissingParentStages(stage).sortBy(_.id)
logDebug("missing: " + missing)
if (missing.isEmpty) {
logInfo("Submitting " + stage + " (" + stage.rdd + "), which has no missing parents")
submitMissingTasks(stage, jobId.get)
} else {
//遞迴呼叫submitStage()方法,去提交父stage,知道最後沒有父stage了。
//此時會提交stage0,其餘的stage都在waitingStages裡面了。
//這裡的遞迴相當於stage演算法的精髓
for (parent <- missing) {
submitStage(parent)
}
// 並且將當前stage放入waitingStages佇列中
waitingStages += stage
}
}
} else {
abortStage(stage, "No active job for stage " + stage.id, None)
}
}
//獲取某個Stage的父Stage
//如果發現最後一個RDD的所有依賴都是窄依賴,就不會建立新的RDD。
//但是如果這個RDD寬依賴了某個RDD,那麼將會建立一個新的stage。
//並且將新的stage立即返回。
private def getMissingParentStages(stage: Stage): List[Stage] = {
val missing = new HashSet[Stage]
val visited = new HashSet[RDD[_]]
// We are manually maintaining a stack here to prevent StackOverflowError
// 定義了一個棧
val waitingForVisit = new Stack[RDD[_]]
def visit(rdd: RDD[_]) {
if (!visited(rdd)) {
visited += rdd
val rddHasUncachedPartitions = getCacheLocs(rdd).contains(Nil)
if (rddHasUncachedPartitions) {
for (dep <- rdd.dependencies) {
dep match {
//如果是寬依賴的話。
//其實對於每一個有shuffle操作的運算元,底層都對應了三個RDD(MapPartitionsRDD,shuffleRDD,MapPartitionsRDD)
//shuffleRdd的map端的會劃分到新的RDD
case shufDep: ShuffleDependency[_, _, _] =>
//使用寬依賴的RDD建立一個stage,並且會將isshufflemap設定為true
//預設最後一個stage不是shufflemap Stage
//但是fianalstage之前的stage都是shuffleMap stage
val mapStage = getShuffleMapStage(shufDep, stage.firstJobId)
if (!mapStage.isAvailable) {
missing += mapStage
}
//如果是窄依賴,就將RDD放入棧中
case narrowDep: NarrowDependency[_] =>
waitingForVisit.push(narrowDep.rdd)
}
}
}
}
//首先,向棧中推入了stage的最後一個RDD
waitingForVisit.push(stage.rdd)
while (waitingForVisit.nonEmpty) {
//對stage的最後一個RDD,呼叫Visit()方法
visit(waitingForVisit.pop())
}
missing.toList
}
//提交stage,為stage建立一批task,task數量與partition數量相同
private def submitMissingTasks(stage: Stage, jobId: Int) {
logDebug("submitMissingTasks(" + stage + ")")
// 獲取partition數量
stage.pendingPartitions.clear()
val partitionsToCompute: Seq[Int] = stage.findMissingPartitions()
initialized.
if (stage.internalAccumulators.isEmpty || stage.numPartitions == partitionsToCompute.size) {
stage.resetInternalAccumulators()
}
val properties = jobIdToActiveJob(jobId).properties
//將stage加入runningStages佇列
runningStages += stage
stage match {
case s: ShuffleMapStage =>
outputCommitCoordinator.stageStart(stage = s.id, maxPartitionId = s.numPartitions - 1)
case s: ResultStage =>
outputCommitCoordinator.stageStart(
stage = s.id, maxPartitionId = s.rdd.partitions.length - 1)
}
val taskIdToLocations: Map[Int, Seq[TaskLocation]] = try {
stage match {
case s: ShuffleMapStage =>
partitionsToCompute.map { id => (id, getPreferredLocs(stage.rdd, id))}.toMap
case s: ResultStage =>
val job = s.activeJob.get
partitionsToCompute.map { id =>
val p = s.partitions(id)
(id, getPreferredLocs(stage.rdd, p))
}.toMap
}
} catch {
case NonFatal(e) =>
stage.makeNewStageAttempt(partitionsToCompute.size)
listenerBus.post(SparkListenerStageSubmitted(stage.latestInfo, properties))
abortStage(stage, s"Task creation failed: $e\n${e.getStackTraceString}", Some(e))
runningStages -= stage
return
}
stage.makeNewStageAttempt(partitionsToCompute.size, taskIdToLocations.values.toSeq)
listenerBus.post(SparkListenerStageSubmitted(stage.latestInfo, properties))
var taskBinary: Broadcast[Array[Byte]] = null
try {
case stage: ShuffleMapStage =>
closureSerializer.serialize((stage.rdd, stage.shuffleDep): AnyRef).array()
case stage: ResultStage =>
closureSerializer.serialize((stage.rdd, stage.func): AnyRef).array()
}
taskBinary = sc.broadcast(taskBinaryBytes)
} catch {
case e: NotSerializableException =>
abortStage(stage, "Task not serializable: " + e.toString, Some(e))
runningStages -= stage
return
case NonFatal(e) =>
abortStage(stage, s"Task serialization failed: $e\n${e.getStackTraceString}", Some(e))
runningStages -= stage
return
}
//為stage建立指定數量的task
val tasks: Seq[Task[_]] = try {
stage match {
//除了final Stage不是shuffle Stage。
case stage: ShuffleMapStage =>
partitionsToCompute.map { id =>
//給每一個partition建立一個task
//給每個task最佳位置
val locs = taskIdToLocations(id)
val part = stage.rdd.partitions(id)
//給shuffle Stage建立ShuffleStageTask
new ShuffleMapTask(stage.id, stage.latestInfo.attemptId,
taskBinary, part, locs, stage.internalAccumulators)
}
//不是S戶發放了 Stage就是finalStage。那麼建立ResultStage
case stage: ResultStage =>
val job = stage.activeJob.get
partitionsToCompute.map { id =>
//給每一個partition建立一個task
//給每個task最佳位置(就是從stage的最後位置開始找,哪個RDD的Partition被Cache了,或被checkPoint了,那麼task的最佳位置就是RDD被Cache或者被CheckPoint的位置)
val p: Int = stage.partitions(id)
val part = stage.rdd.partitions(p)
val locs = taskIdToLocations(id)
new ResultTask(stage.id, stage.latestInfo.attemptId,
taskBinary, part, locs, id, stage.internalAccumulators)
}
}
} catch {
case NonFatal(e) =>
abortStage(stage, s"Task creation failed: $e\n${e.getStackTraceString}", Some(e))
runningStages -= stage
return
}
if (tasks.size > 0) {
logInfo("Submitting " + tasks.size + " missing tasks from " + stage + " (" + stage.rdd + ")")
stage.pendingPartitions ++= tasks.map(_.partitionId)
logDebug("New pending partitions: " + stage.pendingPartitions)
taskScheduler.submitTasks(new TaskSet(
tasks.toArray, stage.id, stage.latestInfo.attemptId, jobId, properties))
stage.latestInfo.submissionTime = Some(clock.getTimeMillis())
} else {
markStageAsFinished(stage, None)
val debugString = stage match {
case stage: ShuffleMapStage =>
s"Stage ${stage} is actually done; " +
s"(available: ${stage.isAvailable}," +
s"available outputs: ${stage.numAvailableOutputs}," +
s"partitions: ${stage.numPartitions})"
case stage : ResultStage =>
s"Stage ${stage} is actually done; (partitions: ${stage.numPartitions})"
}
logDebug(debugString)
}
}
stage劃分演算法總結:
- 從finalstage倒推
- 通過寬依賴進行stage劃分
- 通過遞迴,優先提交父stage