001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.protocol; 019 020import java.util.HashMap; 021import java.util.Map; 022 023import org.apache.hadoop.classification.InterfaceAudience; 024import org.apache.hadoop.fs.Path; 025import org.apache.hadoop.util.StringUtils; 026 027@InterfaceAudience.Private 028public class HdfsConstants { 029 // Long that indicates "leave current quota unchanged" 030 public static final long QUOTA_DONT_SET = Long.MAX_VALUE; 031 public static final long QUOTA_RESET = -1L; 032 public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE; 033 /** 034 * URI Scheme for hdfs://namenode/ URIs. 035 */ 036 public static final String HDFS_URI_SCHEME = "hdfs"; 037 038 public static final byte MEMORY_STORAGE_POLICY_ID = 15; 039 public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST"; 040 public static final byte ALLSSD_STORAGE_POLICY_ID = 12; 041 public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD"; 042 public static final byte ONESSD_STORAGE_POLICY_ID = 10; 043 public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD"; 044 public static final byte HOT_STORAGE_POLICY_ID = 7; 045 public static final String HOT_STORAGE_POLICY_NAME = "HOT"; 046 public static final byte WARM_STORAGE_POLICY_ID = 5; 047 public static final String WARM_STORAGE_POLICY_NAME = "WARM"; 048 public static final byte COLD_STORAGE_POLICY_ID = 2; 049 public static final String COLD_STORAGE_POLICY_NAME = "COLD"; 050 051 // TODO should be conf injected? 052 public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024; 053 /** 054 * A special path component contained in the path for a snapshot file/dir 055 */ 056 public static final String DOT_SNAPSHOT_DIR = ".snapshot"; 057 public static final String SEPARATOR_DOT_SNAPSHOT_DIR 058 = Path.SEPARATOR + DOT_SNAPSHOT_DIR; 059 public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR 060 = Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR; 061 062 /** 063 * Generation stamp of blocks that pre-date the introduction 064 * of a generation stamp. 065 */ 066 public static final long GRANDFATHER_GENERATION_STAMP = 0; 067 /** 068 * The inode id validation of lease check will be skipped when the request 069 * uses GRANDFATHER_INODE_ID for backward compatibility. 070 */ 071 public static final long GRANDFATHER_INODE_ID = 0; 072 public static final byte BLOCK_STORAGE_POLICY_ID_UNSPECIFIED = 0; 073 /** 074 * A prefix put before the namenode URI inside the "service" field 075 * of a delgation token, indicating that the URI is a logical (HA) 076 * URI. 077 */ 078 public static final String HA_DT_SERVICE_PREFIX = "ha-"; 079 // The name of the SafeModeException. FileSystem should retry if it sees 080 // the below exception in RPC 081 public static final String SAFEMODE_EXCEPTION_CLASS_NAME = 082 "org.apache.hadoop.hdfs.server.namenode.SafeModeException"; 083 /** 084 * HDFS Protocol Names: 085 */ 086 public static final String CLIENT_NAMENODE_PROTOCOL_NAME = 087 "org.apache.hadoop.hdfs.protocol.ClientProtocol"; 088 089 // Timeouts for communicating with DataNode for streaming writes/reads 090 public static final int READ_TIMEOUT = 60 * 1000; 091 public static final int READ_TIMEOUT_EXTENSION = 5 * 1000; 092 public static final int WRITE_TIMEOUT = 8 * 60 * 1000; 093 //for write pipeline 094 public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000; 095 096 /** 097 * For a HDFS client to write to a file, a lease is granted; During the lease 098 * period, no other client can write to the file. The writing client can 099 * periodically renew the lease. When the file is closed, the lease is 100 * revoked. The lease duration is bound by this soft limit and a 101 * {@link HdfsConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the 102 * soft limit expires, the writer has sole write access to the file. If the 103 * soft limit expires and the client fails to close the file or renew the 104 * lease, another client can preempt the lease. 105 */ 106 public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000; 107 /** 108 * For a HDFS client to write to a file, a lease is granted; During the lease 109 * period, no other client can write to the file. The writing client can 110 * periodically renew the lease. When the file is closed, the lease is 111 * revoked. The lease duration is bound by a 112 * {@link HdfsConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this hard 113 * limit. If after the hard limit expires and the client has failed to renew 114 * the lease, HDFS assumes that the client has quit and will automatically 115 * close the file on behalf of the writer, and recover the lease. 116 */ 117 public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD; 118 119 // SafeMode actions 120 public enum SafeModeAction { 121 SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET, SAFEMODE_FORCE_EXIT 122 } 123 124 public enum RollingUpgradeAction { 125 QUERY, PREPARE, FINALIZE; 126 127 private static final Map<String, RollingUpgradeAction> MAP 128 = new HashMap<>(); 129 static { 130 MAP.put("", QUERY); 131 for(RollingUpgradeAction a : values()) { 132 MAP.put(a.name(), a); 133 } 134 } 135 136 /** Convert the given String to a RollingUpgradeAction. */ 137 public static RollingUpgradeAction fromString(String s) { 138 return MAP.get(StringUtils.toUpperCase(s)); 139 } 140 } 141 142 // type of the datanode report 143 public enum DatanodeReportType { 144 ALL, LIVE, DEAD, DECOMMISSIONING 145 } 146 147 /* Hidden constructor */ 148 protected HdfsConstants() { 149 } 150}