001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.net;
019
020import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
021
022import java.io.BufferedReader;
023import java.io.FileReader;
024import java.io.IOException;
025import java.util.ArrayList;
026import java.util.HashMap;
027import java.util.List;
028import java.util.Map;
029
030import org.apache.commons.lang.StringUtils;
031import org.apache.commons.logging.Log;
032import org.apache.commons.logging.LogFactory;
033import org.apache.hadoop.classification.InterfaceAudience;
034import org.apache.hadoop.classification.InterfaceStability;
035import org.apache.hadoop.conf.Configuration;
036import org.apache.hadoop.conf.Configured;
037
038/**
039 * <p>
040 * Simple {@link DNSToSwitchMapping} implementation that reads a 2 column text
041 * file. The columns are separated by whitespace. The first column is a DNS or
042 * IP address and the second column specifies the rack where the address maps.
043 * </p>
044 * <p>
045 * This class uses the configuration parameter {@code
046 * net.topology.table.file.name} to locate the mapping file.
047 * </p>
048 * <p>
049 * Calls to {@link #resolve(List)} will look up the address as defined in the
050 * mapping file. If no entry corresponding to the address is found, the value
051 * {@code /default-rack} is returned.
052 * </p>
053 */
054@InterfaceAudience.Public
055@InterfaceStability.Evolving
056public class TableMapping extends CachedDNSToSwitchMapping {
057
058  private static final Log LOG = LogFactory.getLog(TableMapping.class);
059  
060  public TableMapping() {
061    super(new RawTableMapping());
062  }
063  
064  private RawTableMapping getRawMapping() {
065    return (RawTableMapping) rawMapping;
066  }
067
068  @Override
069  public Configuration getConf() {
070    return getRawMapping().getConf();
071  }
072
073  @Override
074  public void setConf(Configuration conf) {
075    super.setConf(conf);
076    getRawMapping().setConf(conf);
077  }
078  
079  @Override
080  public void reloadCachedMappings() {
081    super.reloadCachedMappings();
082    getRawMapping().reloadCachedMappings();
083  }
084  
085  private static final class RawTableMapping extends Configured
086      implements DNSToSwitchMapping {
087    
088    private Map<String, String> map;
089  
090    private Map<String, String> load() {
091      Map<String, String> loadMap = new HashMap<String, String>();
092  
093      String filename = getConf().get(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, null);
094      if (StringUtils.isBlank(filename)) {
095        LOG.warn(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY + " not configured. ");
096        return null;
097      }
098  
099      BufferedReader reader = null;
100      try {
101        reader = new BufferedReader(new FileReader(filename));
102        String line = reader.readLine();
103        while (line != null) {
104          line = line.trim();
105          if (line.length() != 0 && line.charAt(0) != '#') {
106            String[] columns = line.split("\\s+");
107            if (columns.length == 2) {
108              loadMap.put(columns[0], columns[1]);
109            } else {
110              LOG.warn("Line does not have two columns. Ignoring. " + line);
111            }
112          }
113          line = reader.readLine();
114        }
115      } catch (Exception e) {
116        LOG.warn(filename + " cannot be read.", e);
117        return null;
118      } finally {
119        if (reader != null) {
120          try {
121            reader.close();
122          } catch (IOException e) {
123            LOG.warn(filename + " cannot be read.", e);
124            return null;
125          }
126        }
127      }
128      return loadMap;
129    }
130  
131    @Override
132    public synchronized List<String> resolve(List<String> names) {
133      if (map == null) {
134        map = load();
135        if (map == null) {
136          LOG.warn("Failed to read topology table. " +
137            NetworkTopology.DEFAULT_RACK + " will be used for all nodes.");
138          map = new HashMap<String, String>();
139        }
140      }
141      List<String> results = new ArrayList<String>(names.size());
142      for (String name : names) {
143        String result = map.get(name);
144        if (result != null) {
145          results.add(result);
146        } else {
147          results.add(NetworkTopology.DEFAULT_RACK);
148        }
149      }
150      return results;
151    }
152
153    @Override
154    public void reloadCachedMappings() {
155      Map<String, String> newMap = load();
156      if (newMap == null) {
157        LOG.error("Failed to reload the topology table.  The cached " +
158            "mappings will not be cleared.");
159      } else {
160        synchronized(this) {
161          map = newMap;
162        }
163      }
164    }
165
166    @Override
167    public void reloadCachedMappings(List<String> names) {
168      // TableMapping has to reload all mappings at once, so no chance to 
169      // reload mappings on specific nodes
170      reloadCachedMappings();
171    }
172  }
173}