public interface TaskScheduler
TaskSchedulerImpl
.
This interface allows plugging in different task schedulers. Each TaskScheduler schedules tasks
for a single SparkContext. These schedulers get sets of tasks submitted to them from the
DAGScheduler for each stage, and are responsible for sending the tasks to the cluster, running
them, retrying if there are failures, and mitigating stragglers. They return events to the
DAGScheduler.Modifier and Type | Method and Description |
---|---|
String |
appId() |
scala.Option<String> |
applicationAttemptId()
Get an application's attempt ID associated with the job.
|
String |
applicationId()
Get an application ID associated with the job.
|
void |
cancelTasks(int stageId,
boolean interruptThread) |
int |
defaultParallelism() |
boolean |
executorHeartbeatReceived(String execId,
scala.Tuple2<Object,scala.collection.Seq<AccumulatorV2<?,?>>>[] accumUpdates,
BlockManagerId blockManagerId)
Update metrics for in-progress tasks and let the master know that the BlockManager is still
alive.
|
void |
executorLost(String executorId,
org.apache.spark.scheduler.ExecutorLossReason reason)
Process a lost executor
|
void |
killAllTaskAttempts(int stageId,
boolean interruptThread,
String reason) |
boolean |
killTaskAttempt(long taskId,
boolean interruptThread,
String reason)
Kills a task attempt.
|
void |
postStartHook() |
org.apache.spark.scheduler.Pool |
rootPool() |
scala.Enumeration.Value |
schedulingMode() |
void |
setDAGScheduler(org.apache.spark.scheduler.DAGScheduler dagScheduler) |
void |
start() |
void |
stop() |
void |
submitTasks(org.apache.spark.scheduler.TaskSet taskSet) |
void |
workerRemoved(String workerId,
String host,
String message)
Process a removed worker
|
String appId()
scala.Option<String> applicationAttemptId()
String applicationId()
void cancelTasks(int stageId, boolean interruptThread)
int defaultParallelism()
boolean executorHeartbeatReceived(String execId, scala.Tuple2<Object,scala.collection.Seq<AccumulatorV2<?,?>>>[] accumUpdates, BlockManagerId blockManagerId)
execId
- (undocumented)accumUpdates
- (undocumented)blockManagerId
- (undocumented)void executorLost(String executorId, org.apache.spark.scheduler.ExecutorLossReason reason)
executorId
- (undocumented)reason
- (undocumented)void killAllTaskAttempts(int stageId, boolean interruptThread, String reason)
boolean killTaskAttempt(long taskId, boolean interruptThread, String reason)
taskId
- (undocumented)interruptThread
- (undocumented)reason
- (undocumented)void postStartHook()
org.apache.spark.scheduler.Pool rootPool()
scala.Enumeration.Value schedulingMode()
void setDAGScheduler(org.apache.spark.scheduler.DAGScheduler dagScheduler)
void start()
void stop()
void submitTasks(org.apache.spark.scheduler.TaskSet taskSet)
void workerRemoved(String workerId, String host, String message)
workerId
- (undocumented)host
- (undocumented)message
- (undocumented)