001/** 002* Licensed to the Apache Software Foundation (ASF) under one 003* or more contributor license agreements. See the NOTICE file 004* distributed with this work for additional information 005* regarding copyright ownership. The ASF licenses this file 006* to you under the Apache License, Version 2.0 (the 007* "License"); you may not use this file except in compliance 008* with the License. You may obtain a copy of the License at 009* 010* http://www.apache.org/licenses/LICENSE-2.0 011* 012* Unless required by applicable law or agreed to in writing, software 013* distributed under the License is distributed on an "AS IS" BASIS, 014* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015* See the License for the specific language governing permissions and 016* limitations under the License. 017*/ 018 019package org.apache.hadoop.yarn.logaggregation; 020 021import java.io.DataInput; 022import java.io.DataInputStream; 023import java.io.DataOutput; 024import java.io.DataOutputStream; 025import java.io.EOFException; 026import java.io.File; 027import java.io.FileInputStream; 028import java.io.IOException; 029import java.io.InputStreamReader; 030import java.io.OutputStream; 031import java.io.PrintStream; 032import java.io.Writer; 033import java.nio.charset.Charset; 034import java.security.PrivilegedExceptionAction; 035import java.util.ArrayList; 036import java.util.Arrays; 037import java.util.Collections; 038import java.util.EnumSet; 039import java.util.HashMap; 040import java.util.HashSet; 041import java.util.Iterator; 042import java.util.List; 043import java.util.Map; 044import java.util.Map.Entry; 045import java.util.Set; 046import java.util.regex.Pattern; 047 048import org.apache.commons.io.input.BoundedInputStream; 049import org.apache.commons.io.output.WriterOutputStream; 050import org.apache.commons.logging.Log; 051import org.apache.commons.logging.LogFactory; 052import org.apache.hadoop.classification.InterfaceAudience.Private; 053import org.apache.hadoop.classification.InterfaceAudience.Public; 054import org.apache.hadoop.classification.InterfaceStability.Evolving; 055import org.apache.hadoop.conf.Configuration; 056import org.apache.hadoop.fs.CreateFlag; 057import org.apache.hadoop.fs.FSDataInputStream; 058import org.apache.hadoop.fs.FSDataOutputStream; 059import org.apache.hadoop.fs.FileContext; 060import org.apache.hadoop.fs.Options; 061import org.apache.hadoop.fs.Path; 062import org.apache.hadoop.fs.permission.FsPermission; 063import org.apache.hadoop.io.IOUtils; 064import org.apache.hadoop.io.SecureIOUtils; 065import org.apache.hadoop.io.Writable; 066import org.apache.hadoop.io.file.tfile.TFile; 067import org.apache.hadoop.security.UserGroupInformation; 068import org.apache.hadoop.yarn.api.records.ApplicationAccessType; 069import org.apache.hadoop.yarn.api.records.ContainerId; 070import org.apache.hadoop.yarn.api.records.LogAggregationContext; 071import org.apache.hadoop.yarn.conf.YarnConfiguration; 072import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; 073import org.apache.hadoop.yarn.util.ConverterUtils; 074import org.apache.hadoop.yarn.util.Times; 075 076import com.google.common.annotations.VisibleForTesting; 077import com.google.common.base.Predicate; 078import com.google.common.collect.Iterables; 079import com.google.common.collect.Sets; 080 081@Public 082@Evolving 083public class AggregatedLogFormat { 084 085 private static final Log LOG = LogFactory.getLog(AggregatedLogFormat.class); 086 private static final LogKey APPLICATION_ACL_KEY = new LogKey("APPLICATION_ACL"); 087 private static final LogKey APPLICATION_OWNER_KEY = new LogKey("APPLICATION_OWNER"); 088 private static final LogKey VERSION_KEY = new LogKey("VERSION"); 089 private static final Map<String, LogKey> RESERVED_KEYS; 090 //Maybe write out the retention policy. 091 //Maybe write out a list of containerLogs skipped by the retention policy. 092 private static final int VERSION = 1; 093 094 /** 095 * Umask for the log file. 096 */ 097 private static final FsPermission APP_LOG_FILE_UMASK = FsPermission 098 .createImmutable((short) (0640 ^ 0777)); 099 100 101 static { 102 RESERVED_KEYS = new HashMap<String, AggregatedLogFormat.LogKey>(); 103 RESERVED_KEYS.put(APPLICATION_ACL_KEY.toString(), APPLICATION_ACL_KEY); 104 RESERVED_KEYS.put(APPLICATION_OWNER_KEY.toString(), APPLICATION_OWNER_KEY); 105 RESERVED_KEYS.put(VERSION_KEY.toString(), VERSION_KEY); 106 } 107 108 @Public 109 public static class LogKey implements Writable { 110 111 private String keyString; 112 113 public LogKey() { 114 115 } 116 117 public LogKey(ContainerId containerId) { 118 this.keyString = containerId.toString(); 119 } 120 121 public LogKey(String keyString) { 122 this.keyString = keyString; 123 } 124 125 @Override 126 public int hashCode() { 127 return keyString == null ? 0 : keyString.hashCode(); 128 } 129 130 @Override 131 public boolean equals(Object obj) { 132 if (obj instanceof LogKey) { 133 LogKey other = (LogKey) obj; 134 if (this.keyString == null) { 135 return other.keyString == null; 136 } 137 return this.keyString.equals(other.keyString); 138 } 139 return false; 140 } 141 142 @Private 143 @Override 144 public void write(DataOutput out) throws IOException { 145 out.writeUTF(this.keyString); 146 } 147 148 @Private 149 @Override 150 public void readFields(DataInput in) throws IOException { 151 this.keyString = in.readUTF(); 152 } 153 154 @Override 155 public String toString() { 156 return this.keyString; 157 } 158 } 159 160 @Private 161 public static class LogValue { 162 163 private final List<String> rootLogDirs; 164 private final ContainerId containerId; 165 private final String user; 166 private final LogAggregationContext logAggregationContext; 167 private Set<File> uploadedFiles = new HashSet<File>(); 168 private final Set<String> alreadyUploadedLogFiles; 169 private Set<String> allExistingFileMeta = new HashSet<String>(); 170 private final boolean appFinished; 171 // TODO Maybe add a version string here. Instead of changing the version of 172 // the entire k-v format 173 174 public LogValue(List<String> rootLogDirs, ContainerId containerId, 175 String user) { 176 this(rootLogDirs, containerId, user, null, new HashSet<String>(), true); 177 } 178 179 public LogValue(List<String> rootLogDirs, ContainerId containerId, 180 String user, LogAggregationContext logAggregationContext, 181 Set<String> alreadyUploadedLogFiles, boolean appFinished) { 182 this.rootLogDirs = new ArrayList<String>(rootLogDirs); 183 this.containerId = containerId; 184 this.user = user; 185 186 // Ensure logs are processed in lexical order 187 Collections.sort(this.rootLogDirs); 188 this.logAggregationContext = logAggregationContext; 189 this.alreadyUploadedLogFiles = alreadyUploadedLogFiles; 190 this.appFinished = appFinished; 191 } 192 193 private Set<File> getPendingLogFilesToUploadForThisContainer() { 194 Set<File> pendingUploadFiles = new HashSet<File>(); 195 for (String rootLogDir : this.rootLogDirs) { 196 File appLogDir = 197 new File(rootLogDir, 198 ConverterUtils.toString( 199 this.containerId.getApplicationAttemptId(). 200 getApplicationId()) 201 ); 202 File containerLogDir = 203 new File(appLogDir, ConverterUtils.toString(this.containerId)); 204 205 if (!containerLogDir.isDirectory()) { 206 continue; // ContainerDir may have been deleted by the user. 207 } 208 209 pendingUploadFiles 210 .addAll(getPendingLogFilesToUpload(containerLogDir)); 211 } 212 return pendingUploadFiles; 213 } 214 215 public void write(DataOutputStream out, Set<File> pendingUploadFiles) 216 throws IOException { 217 List<File> fileList = new ArrayList<File>(pendingUploadFiles); 218 Collections.sort(fileList); 219 220 for (File logFile : fileList) { 221 // We only aggregate top level files. 222 // Ignore anything inside sub-folders. 223 if (logFile.isDirectory()) { 224 LOG.warn(logFile.getAbsolutePath() + " is a directory. Ignore it."); 225 continue; 226 } 227 228 FileInputStream in = null; 229 try { 230 in = secureOpenFile(logFile); 231 } catch (IOException e) { 232 logErrorMessage(logFile, e); 233 IOUtils.cleanup(LOG, in); 234 continue; 235 } 236 237 final long fileLength = logFile.length(); 238 // Write the logFile Type 239 out.writeUTF(logFile.getName()); 240 241 // Write the log length as UTF so that it is printable 242 out.writeUTF(String.valueOf(fileLength)); 243 244 // Write the log itself 245 try { 246 byte[] buf = new byte[65535]; 247 int len = 0; 248 long bytesLeft = fileLength; 249 while ((len = in.read(buf)) != -1) { 250 //If buffer contents within fileLength, write 251 if (len < bytesLeft) { 252 out.write(buf, 0, len); 253 bytesLeft-=len; 254 } 255 //else only write contents within fileLength, then exit early 256 else { 257 out.write(buf, 0, (int)bytesLeft); 258 break; 259 } 260 } 261 long newLength = logFile.length(); 262 if(fileLength < newLength) { 263 LOG.warn("Aggregated logs truncated by approximately "+ 264 (newLength-fileLength) +" bytes."); 265 } 266 this.uploadedFiles.add(logFile); 267 } catch (IOException e) { 268 String message = logErrorMessage(logFile, e); 269 out.write(message.getBytes(Charset.forName("UTF-8"))); 270 } finally { 271 IOUtils.cleanup(LOG, in); 272 } 273 } 274 } 275 276 @VisibleForTesting 277 public FileInputStream secureOpenFile(File logFile) throws IOException { 278 return SecureIOUtils.openForRead(logFile, getUser(), null); 279 } 280 281 private static String logErrorMessage(File logFile, Exception e) { 282 String message = "Error aggregating log file. Log file : " 283 + logFile.getAbsolutePath() + ". " + e.getMessage(); 284 LOG.error(message, e); 285 return message; 286 } 287 288 // Added for testing purpose. 289 public String getUser() { 290 return user; 291 } 292 293 private Set<File> getPendingLogFilesToUpload(File containerLogDir) { 294 Set<File> candidates = 295 new HashSet<File>(Arrays.asList(containerLogDir.listFiles())); 296 for (File logFile : candidates) { 297 this.allExistingFileMeta.add(getLogFileMetaData(logFile)); 298 } 299 300 if (this.logAggregationContext != null && candidates.size() > 0) { 301 filterFiles( 302 this.appFinished ? this.logAggregationContext.getIncludePattern() 303 : this.logAggregationContext.getRolledLogsIncludePattern(), 304 candidates, false); 305 306 filterFiles( 307 this.appFinished ? this.logAggregationContext.getExcludePattern() 308 : this.logAggregationContext.getRolledLogsExcludePattern(), 309 candidates, true); 310 311 Iterable<File> mask = 312 Iterables.filter(candidates, new Predicate<File>() { 313 @Override 314 public boolean apply(File next) { 315 return !alreadyUploadedLogFiles 316 .contains(getLogFileMetaData(next)); 317 } 318 }); 319 candidates = Sets.newHashSet(mask); 320 } 321 return candidates; 322 } 323 324 private void filterFiles(String pattern, Set<File> candidates, 325 boolean exclusion) { 326 if (pattern != null && !pattern.isEmpty()) { 327 Pattern filterPattern = Pattern.compile(pattern); 328 for (Iterator<File> candidatesItr = candidates.iterator(); candidatesItr 329 .hasNext();) { 330 File candidate = candidatesItr.next(); 331 boolean match = filterPattern.matcher(candidate.getName()).find(); 332 if ((!match && !exclusion) || (match && exclusion)) { 333 candidatesItr.remove(); 334 } 335 } 336 } 337 } 338 339 public Set<Path> getCurrentUpLoadedFilesPath() { 340 Set<Path> path = new HashSet<Path>(); 341 for (File file : this.uploadedFiles) { 342 path.add(new Path(file.getAbsolutePath())); 343 } 344 return path; 345 } 346 347 public Set<String> getCurrentUpLoadedFileMeta() { 348 Set<String> info = new HashSet<String>(); 349 for (File file : this.uploadedFiles) { 350 info.add(getLogFileMetaData(file)); 351 } 352 return info; 353 } 354 355 public Set<String> getAllExistingFilesMeta() { 356 return this.allExistingFileMeta; 357 } 358 359 private String getLogFileMetaData(File file) { 360 return containerId.toString() + "_" + file.getName() + "_" 361 + file.lastModified(); 362 } 363 } 364 365 /** 366 * The writer that writes out the aggregated logs. 367 */ 368 @Private 369 public static class LogWriter { 370 371 private final FSDataOutputStream fsDataOStream; 372 private final TFile.Writer writer; 373 private FileContext fc; 374 375 public LogWriter(final Configuration conf, final Path remoteAppLogFile, 376 UserGroupInformation userUgi) throws IOException { 377 try { 378 this.fsDataOStream = 379 userUgi.doAs(new PrivilegedExceptionAction<FSDataOutputStream>() { 380 @Override 381 public FSDataOutputStream run() throws Exception { 382 fc = FileContext.getFileContext(conf); 383 fc.setUMask(APP_LOG_FILE_UMASK); 384 return fc.create( 385 remoteAppLogFile, 386 EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 387 new Options.CreateOpts[] {}); 388 } 389 }); 390 } catch (InterruptedException e) { 391 throw new IOException(e); 392 } 393 394 // Keys are not sorted: null arg 395 // 256KB minBlockSize : Expected log size for each container too 396 this.writer = 397 new TFile.Writer(this.fsDataOStream, 256 * 1024, conf.get( 398 YarnConfiguration.NM_LOG_AGG_COMPRESSION_TYPE, 399 YarnConfiguration.DEFAULT_NM_LOG_AGG_COMPRESSION_TYPE), null, conf); 400 //Write the version string 401 writeVersion(); 402 } 403 404 @VisibleForTesting 405 public TFile.Writer getWriter() { 406 return this.writer; 407 } 408 409 private void writeVersion() throws IOException { 410 DataOutputStream out = this.writer.prepareAppendKey(-1); 411 VERSION_KEY.write(out); 412 out.close(); 413 out = this.writer.prepareAppendValue(-1); 414 out.writeInt(VERSION); 415 out.close(); 416 } 417 418 public void writeApplicationOwner(String user) throws IOException { 419 DataOutputStream out = this.writer.prepareAppendKey(-1); 420 APPLICATION_OWNER_KEY.write(out); 421 out.close(); 422 out = this.writer.prepareAppendValue(-1); 423 out.writeUTF(user); 424 out.close(); 425 } 426 427 public void writeApplicationACLs(Map<ApplicationAccessType, String> appAcls) 428 throws IOException { 429 DataOutputStream out = this.writer.prepareAppendKey(-1); 430 APPLICATION_ACL_KEY.write(out); 431 out.close(); 432 out = this.writer.prepareAppendValue(-1); 433 for (Entry<ApplicationAccessType, String> entry : appAcls.entrySet()) { 434 out.writeUTF(entry.getKey().toString()); 435 out.writeUTF(entry.getValue()); 436 } 437 out.close(); 438 } 439 440 public void append(LogKey logKey, LogValue logValue) throws IOException { 441 Set<File> pendingUploadFiles = 442 logValue.getPendingLogFilesToUploadForThisContainer(); 443 if (pendingUploadFiles.size() == 0) { 444 return; 445 } 446 DataOutputStream out = this.writer.prepareAppendKey(-1); 447 logKey.write(out); 448 out.close(); 449 out = this.writer.prepareAppendValue(-1); 450 logValue.write(out, pendingUploadFiles); 451 out.close(); 452 } 453 454 public void close() { 455 try { 456 this.writer.close(); 457 } catch (IOException e) { 458 LOG.warn("Exception closing writer", e); 459 } 460 IOUtils.closeStream(fsDataOStream); 461 } 462 } 463 464 @Public 465 @Evolving 466 public static class LogReader { 467 468 private final FSDataInputStream fsDataIStream; 469 private final TFile.Reader.Scanner scanner; 470 private final TFile.Reader reader; 471 472 public LogReader(Configuration conf, Path remoteAppLogFile) 473 throws IOException { 474 FileContext fileContext = FileContext.getFileContext(conf); 475 this.fsDataIStream = fileContext.open(remoteAppLogFile); 476 reader = 477 new TFile.Reader(this.fsDataIStream, fileContext.getFileStatus( 478 remoteAppLogFile).getLen(), conf); 479 this.scanner = reader.createScanner(); 480 } 481 482 private boolean atBeginning = true; 483 484 /** 485 * Returns the owner of the application. 486 * 487 * @return the application owner. 488 * @throws IOException 489 */ 490 public String getApplicationOwner() throws IOException { 491 TFile.Reader.Scanner ownerScanner = reader.createScanner(); 492 LogKey key = new LogKey(); 493 while (!ownerScanner.atEnd()) { 494 TFile.Reader.Scanner.Entry entry = ownerScanner.entry(); 495 key.readFields(entry.getKeyStream()); 496 if (key.toString().equals(APPLICATION_OWNER_KEY.toString())) { 497 DataInputStream valueStream = entry.getValueStream(); 498 return valueStream.readUTF(); 499 } 500 ownerScanner.advance(); 501 } 502 return null; 503 } 504 505 /** 506 * Returns ACLs for the application. An empty map is returned if no ACLs are 507 * found. 508 * 509 * @return a map of the Application ACLs. 510 * @throws IOException 511 */ 512 public Map<ApplicationAccessType, String> getApplicationAcls() 513 throws IOException { 514 // TODO Seek directly to the key once a comparator is specified. 515 TFile.Reader.Scanner aclScanner = reader.createScanner(); 516 LogKey key = new LogKey(); 517 Map<ApplicationAccessType, String> acls = 518 new HashMap<ApplicationAccessType, String>(); 519 while (!aclScanner.atEnd()) { 520 TFile.Reader.Scanner.Entry entry = aclScanner.entry(); 521 key.readFields(entry.getKeyStream()); 522 if (key.toString().equals(APPLICATION_ACL_KEY.toString())) { 523 DataInputStream valueStream = entry.getValueStream(); 524 while (true) { 525 String appAccessOp = null; 526 String aclString = null; 527 try { 528 appAccessOp = valueStream.readUTF(); 529 } catch (EOFException e) { 530 // Valid end of stream. 531 break; 532 } 533 try { 534 aclString = valueStream.readUTF(); 535 } catch (EOFException e) { 536 throw new YarnRuntimeException("Error reading ACLs", e); 537 } 538 acls.put(ApplicationAccessType.valueOf(appAccessOp), aclString); 539 } 540 541 } 542 aclScanner.advance(); 543 } 544 return acls; 545 } 546 547 /** 548 * Read the next key and return the value-stream. 549 * 550 * @param key 551 * @return the valueStream if there are more keys or null otherwise. 552 * @throws IOException 553 */ 554 public DataInputStream next(LogKey key) throws IOException { 555 if (!this.atBeginning) { 556 this.scanner.advance(); 557 } else { 558 this.atBeginning = false; 559 } 560 if (this.scanner.atEnd()) { 561 return null; 562 } 563 TFile.Reader.Scanner.Entry entry = this.scanner.entry(); 564 key.readFields(entry.getKeyStream()); 565 // Skip META keys 566 if (RESERVED_KEYS.containsKey(key.toString())) { 567 return next(key); 568 } 569 DataInputStream valueStream = entry.getValueStream(); 570 return valueStream; 571 } 572 573 /** 574 * Get a ContainerLogsReader to read the logs for 575 * the specified container. 576 * 577 * @param containerId 578 * @return object to read the container's logs or null if the 579 * logs could not be found 580 * @throws IOException 581 */ 582 @Private 583 public ContainerLogsReader getContainerLogsReader( 584 ContainerId containerId) throws IOException { 585 ContainerLogsReader logReader = null; 586 587 final LogKey containerKey = new LogKey(containerId); 588 LogKey key = new LogKey(); 589 DataInputStream valueStream = next(key); 590 while (valueStream != null && !key.equals(containerKey)) { 591 valueStream = next(key); 592 } 593 594 if (valueStream != null) { 595 logReader = new ContainerLogsReader(valueStream); 596 } 597 598 return logReader; 599 } 600 601 //TODO Change Log format and interfaces to be containerId specific. 602 // Avoid returning completeValueStreams. 603// public List<String> getTypesForContainer(DataInputStream valueStream){} 604// 605// /** 606// * @param valueStream 607// * The Log stream for the container. 608// * @param fileType 609// * the log type required. 610// * @return An InputStreamReader for the required log type or null if the 611// * type is not found. 612// * @throws IOException 613// */ 614// public InputStreamReader getLogStreamForType(DataInputStream valueStream, 615// String fileType) throws IOException { 616// valueStream.reset(); 617// try { 618// while (true) { 619// String ft = valueStream.readUTF(); 620// String fileLengthStr = valueStream.readUTF(); 621// long fileLength = Long.parseLong(fileLengthStr); 622// if (ft.equals(fileType)) { 623// BoundedInputStream bis = 624// new BoundedInputStream(valueStream, fileLength); 625// return new InputStreamReader(bis); 626// } else { 627// long totalSkipped = 0; 628// long currSkipped = 0; 629// while (currSkipped != -1 && totalSkipped < fileLength) { 630// currSkipped = valueStream.skip(fileLength - totalSkipped); 631// totalSkipped += currSkipped; 632// } 633// // TODO Verify skip behaviour. 634// if (currSkipped == -1) { 635// return null; 636// } 637// } 638// } 639// } catch (EOFException e) { 640// return null; 641// } 642// } 643 644 /** 645 * Writes all logs for a single container to the provided writer. 646 * @param valueStream 647 * @param writer 648 * @param logUploadedTime 649 * @throws IOException 650 */ 651 public static void readAcontainerLogs(DataInputStream valueStream, 652 Writer writer, long logUploadedTime) throws IOException { 653 OutputStream os = null; 654 PrintStream ps = null; 655 try { 656 os = new WriterOutputStream(writer, Charset.forName("UTF-8")); 657 ps = new PrintStream(os); 658 while (true) { 659 try { 660 readContainerLogs(valueStream, ps, logUploadedTime); 661 } catch (EOFException e) { 662 // EndOfFile 663 return; 664 } 665 } 666 } finally { 667 IOUtils.cleanup(LOG, ps); 668 IOUtils.cleanup(LOG, os); 669 } 670 } 671 672 /** 673 * Writes all logs for a single container to the provided writer. 674 * @param valueStream 675 * @param writer 676 * @throws IOException 677 */ 678 public static void readAcontainerLogs(DataInputStream valueStream, 679 Writer writer) throws IOException { 680 readAcontainerLogs(valueStream, writer, -1); 681 } 682 683 private static void readContainerLogs(DataInputStream valueStream, 684 PrintStream out, long logUploadedTime) throws IOException { 685 byte[] buf = new byte[65535]; 686 687 String fileType = valueStream.readUTF(); 688 String fileLengthStr = valueStream.readUTF(); 689 long fileLength = Long.parseLong(fileLengthStr); 690 out.print("LogType:"); 691 out.println(fileType); 692 if (logUploadedTime != -1) { 693 out.print("Log Upload Time:"); 694 out.println(Times.format(logUploadedTime)); 695 } 696 out.print("LogLength:"); 697 out.println(fileLengthStr); 698 out.println("Log Contents:"); 699 700 long curRead = 0; 701 long pendingRead = fileLength - curRead; 702 int toRead = 703 pendingRead > buf.length ? buf.length : (int) pendingRead; 704 int len = valueStream.read(buf, 0, toRead); 705 while (len != -1 && curRead < fileLength) { 706 out.write(buf, 0, len); 707 curRead += len; 708 709 pendingRead = fileLength - curRead; 710 toRead = 711 pendingRead > buf.length ? buf.length : (int) pendingRead; 712 len = valueStream.read(buf, 0, toRead); 713 } 714 out.println("End of LogType:" + fileType); 715 out.println(""); 716 } 717 718 /** 719 * Keep calling this till you get a {@link EOFException} for getting logs of 720 * all types for a single container. 721 * 722 * @param valueStream 723 * @param out 724 * @param logUploadedTime 725 * @throws IOException 726 */ 727 public static void readAContainerLogsForALogType( 728 DataInputStream valueStream, PrintStream out, long logUploadedTime) 729 throws IOException { 730 readContainerLogs(valueStream, out, logUploadedTime); 731 } 732 733 /** 734 * Keep calling this till you get a {@link EOFException} for getting logs of 735 * all types for a single container. 736 * 737 * @param valueStream 738 * @param out 739 * @throws IOException 740 */ 741 public static void readAContainerLogsForALogType( 742 DataInputStream valueStream, PrintStream out) 743 throws IOException { 744 readAContainerLogsForALogType(valueStream, out, -1); 745 } 746 747 public void close() { 748 IOUtils.cleanup(LOG, scanner, reader, fsDataIStream); 749 } 750 } 751 752 @Private 753 public static class ContainerLogsReader { 754 private DataInputStream valueStream; 755 private String currentLogType = null; 756 private long currentLogLength = 0; 757 private BoundedInputStream currentLogData = null; 758 private InputStreamReader currentLogISR; 759 760 public ContainerLogsReader(DataInputStream stream) { 761 valueStream = stream; 762 } 763 764 public String nextLog() throws IOException { 765 if (currentLogData != null && currentLogLength > 0) { 766 // seek to the end of the current log, relying on BoundedInputStream 767 // to prevent seeking past the end of the current log 768 do { 769 if (currentLogData.skip(currentLogLength) < 0) { 770 break; 771 } 772 } while (currentLogData.read() != -1); 773 } 774 775 currentLogType = null; 776 currentLogLength = 0; 777 currentLogData = null; 778 currentLogISR = null; 779 780 try { 781 String logType = valueStream.readUTF(); 782 String logLengthStr = valueStream.readUTF(); 783 currentLogLength = Long.parseLong(logLengthStr); 784 currentLogData = 785 new BoundedInputStream(valueStream, currentLogLength); 786 currentLogData.setPropagateClose(false); 787 currentLogISR = new InputStreamReader(currentLogData, 788 Charset.forName("UTF-8")); 789 currentLogType = logType; 790 } catch (EOFException e) { 791 } 792 793 return currentLogType; 794 } 795 796 public String getCurrentLogType() { 797 return currentLogType; 798 } 799 800 public long getCurrentLogLength() { 801 return currentLogLength; 802 } 803 804 public long skip(long n) throws IOException { 805 return currentLogData.skip(n); 806 } 807 808 public int read() throws IOException { 809 return currentLogData.read(); 810 } 811 812 public int read(byte[] buf, int off, int len) throws IOException { 813 return currentLogData.read(buf, off, len); 814 } 815 816 public int read(char[] buf, int off, int len) throws IOException { 817 return currentLogISR.read(buf, off, len); 818 } 819 } 820}