public static interface MRProtos.TaskAttemptReportProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder
Modifier and Type | Method and Description |
---|---|
org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto |
getContainerId()
optional .hadoop.yarn.ContainerIdProto container_id = 15; |
org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProtoOrBuilder |
getContainerIdOrBuilder()
optional .hadoop.yarn.ContainerIdProto container_id = 15; |
MRProtos.CountersProto |
getCounters()
optional .hadoop.mapreduce.CountersProto counters = 6; |
MRProtos.CountersProtoOrBuilder |
getCountersOrBuilder()
optional .hadoop.mapreduce.CountersProto counters = 6; |
String |
getDiagnosticInfo()
optional string diagnostic_info = 7; |
com.google.protobuf.ByteString |
getDiagnosticInfoBytes()
optional string diagnostic_info = 7; |
long |
getFinishTime()
optional int64 finish_time = 5; |
String |
getNodeManagerHost()
optional string node_manager_host = 12; |
com.google.protobuf.ByteString |
getNodeManagerHostBytes()
optional string node_manager_host = 12; |
int |
getNodeManagerHttpPort()
optional int32 node_manager_http_port = 14; |
int |
getNodeManagerPort()
optional int32 node_manager_port = 13; |
MRProtos.PhaseProto |
getPhase()
optional .hadoop.mapreduce.PhaseProto phase = 9; |
float |
getProgress()
optional float progress = 3; |
long |
getShuffleFinishTime()
optional int64 shuffle_finish_time = 10; |
long |
getSortFinishTime()
optional int64 sort_finish_time = 11; |
long |
getStartTime()
optional int64 start_time = 4; |
String |
getStateString()
optional string state_string = 8; |
com.google.protobuf.ByteString |
getStateStringBytes()
optional string state_string = 8; |
MRProtos.TaskAttemptIdProto |
getTaskAttemptId()
optional .hadoop.mapreduce.TaskAttemptIdProto task_attempt_id = 1; |
MRProtos.TaskAttemptIdProtoOrBuilder |
getTaskAttemptIdOrBuilder()
optional .hadoop.mapreduce.TaskAttemptIdProto task_attempt_id = 1; |
MRProtos.TaskAttemptStateProto |
getTaskAttemptState()
optional .hadoop.mapreduce.TaskAttemptStateProto task_attempt_state = 2; |
boolean |
hasContainerId()
optional .hadoop.yarn.ContainerIdProto container_id = 15; |
boolean |
hasCounters()
optional .hadoop.mapreduce.CountersProto counters = 6; |
boolean |
hasDiagnosticInfo()
optional string diagnostic_info = 7; |
boolean |
hasFinishTime()
optional int64 finish_time = 5; |
boolean |
hasNodeManagerHost()
optional string node_manager_host = 12; |
boolean |
hasNodeManagerHttpPort()
optional int32 node_manager_http_port = 14; |
boolean |
hasNodeManagerPort()
optional int32 node_manager_port = 13; |
boolean |
hasPhase()
optional .hadoop.mapreduce.PhaseProto phase = 9; |
boolean |
hasProgress()
optional float progress = 3; |
boolean |
hasShuffleFinishTime()
optional int64 shuffle_finish_time = 10; |
boolean |
hasSortFinishTime()
optional int64 sort_finish_time = 11; |
boolean |
hasStartTime()
optional int64 start_time = 4; |
boolean |
hasStateString()
optional string state_string = 8; |
boolean |
hasTaskAttemptId()
optional .hadoop.mapreduce.TaskAttemptIdProto task_attempt_id = 1; |
boolean |
hasTaskAttemptState()
optional .hadoop.mapreduce.TaskAttemptStateProto task_attempt_state = 2; |
boolean hasTaskAttemptId()
optional .hadoop.mapreduce.TaskAttemptIdProto task_attempt_id = 1;
MRProtos.TaskAttemptIdProto getTaskAttemptId()
optional .hadoop.mapreduce.TaskAttemptIdProto task_attempt_id = 1;
MRProtos.TaskAttemptIdProtoOrBuilder getTaskAttemptIdOrBuilder()
optional .hadoop.mapreduce.TaskAttemptIdProto task_attempt_id = 1;
boolean hasTaskAttemptState()
optional .hadoop.mapreduce.TaskAttemptStateProto task_attempt_state = 2;
MRProtos.TaskAttemptStateProto getTaskAttemptState()
optional .hadoop.mapreduce.TaskAttemptStateProto task_attempt_state = 2;
boolean hasProgress()
optional float progress = 3;
float getProgress()
optional float progress = 3;
boolean hasStartTime()
optional int64 start_time = 4;
long getStartTime()
optional int64 start_time = 4;
boolean hasFinishTime()
optional int64 finish_time = 5;
long getFinishTime()
optional int64 finish_time = 5;
boolean hasCounters()
optional .hadoop.mapreduce.CountersProto counters = 6;
MRProtos.CountersProto getCounters()
optional .hadoop.mapreduce.CountersProto counters = 6;
MRProtos.CountersProtoOrBuilder getCountersOrBuilder()
optional .hadoop.mapreduce.CountersProto counters = 6;
boolean hasDiagnosticInfo()
optional string diagnostic_info = 7;
String getDiagnosticInfo()
optional string diagnostic_info = 7;
com.google.protobuf.ByteString getDiagnosticInfoBytes()
optional string diagnostic_info = 7;
boolean hasStateString()
optional string state_string = 8;
String getStateString()
optional string state_string = 8;
com.google.protobuf.ByteString getStateStringBytes()
optional string state_string = 8;
boolean hasPhase()
optional .hadoop.mapreduce.PhaseProto phase = 9;
MRProtos.PhaseProto getPhase()
optional .hadoop.mapreduce.PhaseProto phase = 9;
boolean hasShuffleFinishTime()
optional int64 shuffle_finish_time = 10;
long getShuffleFinishTime()
optional int64 shuffle_finish_time = 10;
boolean hasSortFinishTime()
optional int64 sort_finish_time = 11;
long getSortFinishTime()
optional int64 sort_finish_time = 11;
boolean hasNodeManagerHost()
optional string node_manager_host = 12;
String getNodeManagerHost()
optional string node_manager_host = 12;
com.google.protobuf.ByteString getNodeManagerHostBytes()
optional string node_manager_host = 12;
boolean hasNodeManagerPort()
optional int32 node_manager_port = 13;
int getNodeManagerPort()
optional int32 node_manager_port = 13;
boolean hasNodeManagerHttpPort()
optional int32 node_manager_http_port = 14;
int getNodeManagerHttpPort()
optional int32 node_manager_http_port = 14;
boolean hasContainerId()
optional .hadoop.yarn.ContainerIdProto container_id = 15;
org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto getContainerId()
optional .hadoop.yarn.ContainerIdProto container_id = 15;
org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProtoOrBuilder getContainerIdOrBuilder()
optional .hadoop.yarn.ContainerIdProto container_id = 15;
Copyright © 2008–2019 Apache Software Foundation. All rights reserved.