HDFS-8055. NullPointerException when topology script is missing. Contributed by Anu Engineer.

(cherry picked from commit fef596df03)
This commit is contained in:
cnauroth 2015-04-14 10:19:30 -07:00
parent 9779b79ed8
commit c7ebecfff5
7 changed files with 172 additions and 23 deletions

View File

@ -176,6 +176,9 @@ Release 2.8.0 - UNRELEASED
HDFS-6666. Abort NameNode and DataNode startup if security is enabled but
block access token is not enabled. (Vijay Bhat via cnauroth)
HDFS-8055. NullPointerException when topology script is missing.
(Anu Engineer via cnauroth)
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -372,9 +372,17 @@ public class DatanodeManager {
if (client == null) {
List<String> hosts = new ArrayList<String> (1);
hosts.add(targethost);
String rName = dnsToSwitchMapping.resolve(hosts).get(0);
if (rName != null)
client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost);
List<String> resolvedHosts = dnsToSwitchMapping.resolve(hosts);
if (resolvedHosts != null && !resolvedHosts.isEmpty()) {
String rName = resolvedHosts.get(0);
if (rName != null) {
client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR +
targethost);
}
} else {
LOG.error("Node Resolution failed. Please make sure that rack " +
"awareness scripts are functional.");
}
}
Comparator<DatanodeInfo> comparator = avoidStaleDataNodesForRead ?

View File

@ -19,6 +19,10 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
@ -31,6 +35,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -40,10 +45,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.util.Shell;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
@ -224,26 +229,74 @@ public class TestDatanodeManager {
* with the storage ids and storage types.
*/
@Test
public void testSortLocatedBlocks() throws IOException {
public void testSortLocatedBlocks() throws IOException, URISyntaxException {
HelperFunction(null);
}
/**
* Execute a functional topology script and make sure that helper
* function works correctly
*
* @throws IOException
* @throws URISyntaxException
*/
@Test
public void testgoodScript() throws IOException, URISyntaxException {
HelperFunction("/" + Shell.appendScriptExtension("topology-script"));
}
/**
* Run a broken script and verify that helper function is able to
* ignore the broken script and work correctly
*
* @throws IOException
* @throws URISyntaxException
*/
@Test
public void testBadScript() throws IOException, URISyntaxException {
HelperFunction("/"+ Shell.appendScriptExtension("topology-broken-script"));
}
/**
* Helper function that tests the DatanodeManagers SortedBlock function
* we invoke this function with and without topology scripts
*
* @param scriptFileName - Script Name or null
*
* @throws URISyntaxException
* @throws IOException
*/
public void HelperFunction(String scriptFileName)
throws URISyntaxException, IOException {
// create the DatanodeManager which will be tested
Configuration conf = new Configuration();
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
if (scriptFileName != null && !scriptFileName.isEmpty()) {
URL shellScript = getClass().getResource(scriptFileName);
Path resourcePath = Paths.get(shellScript.toURI());
FileUtil.setExecutable(resourcePath.toFile(), true);
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
resourcePath.toString());
}
DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
fsn, new Configuration());
fsn, conf);
// register 5 datanodes, each with different storage ID and type
DatanodeInfo[] locs = new DatanodeInfo[5];
String[] storageIDs = new String[5];
StorageType[] storageTypes = new StorageType[]{
StorageType.ARCHIVE,
StorageType.DEFAULT,
StorageType.DISK,
StorageType.RAM_DISK,
StorageType.SSD
StorageType.ARCHIVE,
StorageType.DEFAULT,
StorageType.DISK,
StorageType.RAM_DISK,
StorageType.SSD
};
for(int i = 0; i < 5; i++) {
for (int i = 0; i < 5; i++) {
// register new datanode
String uuid = "UUID-"+i;
String uuid = "UUID-" + i;
String ip = "IP-" + i;
DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
@ -255,7 +308,7 @@ public class TestDatanodeManager {
// get location and storage information
locs[i] = dm.getDatanode(uuid);
storageIDs[i] = "storageID-"+i;
storageIDs[i] = "storageID-" + i;
}
// set first 2 locations as decomissioned
@ -280,18 +333,19 @@ public class TestDatanodeManager {
assertThat(sortedLocs.length, is(5));
assertThat(storageIDs.length, is(5));
assertThat(storageTypes.length, is(5));
for(int i = 0; i < sortedLocs.length; i++) {
assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
for (int i = 0; i < sortedLocs.length; i++) {
assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageID(),
is(storageIDs[i]));
assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageType(),
is(storageTypes[i]));
}
// Ensure the local node is first.
assertThat(sortedLocs[0].getIpAddr(), is(targetIp));
// Ensure the two decommissioned DNs were moved to the end.
assertThat(sortedLocs[sortedLocs.length-1].getAdminState(),
is(DatanodeInfo.AdminStates.DECOMMISSIONED));
assertThat(sortedLocs[sortedLocs.length-2].getAdminState(),
is(DatanodeInfo.AdminStates.DECOMMISSIONED));
assertThat(sortedLocs[sortedLocs.length - 1].getAdminState(),
is(DatanodeInfo.AdminStates.DECOMMISSIONED));
assertThat(sortedLocs[sortedLocs.length - 2].getAdminState(),
is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
}

View File

@ -0,0 +1,22 @@
@echo off
@rem Licensed to the Apache Software Foundation (ASF) under one or more
@rem contributor license agreements. See the NOTICE file distributed with
@rem this work for additional information regarding copyright ownership.
@rem The ASF licenses this file to You under the Apache License, Version 2.0
@rem (the "License"); you may not use this file except in compliance with
@rem the License. You may obtain a copy of the License at
@rem
@rem http://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@rem yes, this is a broken script, please don't fix this.
@rem This is used in a test case to verify that we can handle broken
@rem topology scripts.
exit 1

View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## yes, this is a broken script, please don't fix this.
## This is used in a test case to verify that we can handle broken
## topology scripts.
exit 1

View File

@ -0,0 +1,18 @@
@echo off
@rem Licensed to the Apache Software Foundation (ASF) under one or more
@rem contributor license agreements. See the NOTICE file distributed with
@rem this work for additional information regarding copyright ownership.
@rem The ASF licenses this file to You under the Apache License, Version 2.0
@rem (the "License"); you may not use this file except in compliance with
@rem the License. You may obtain a copy of the License at
@rem
@rem http://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
for /F "delims=- tokens=2" %%A in ("%1") do echo /rackID-%%A

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo $1 | awk -F'-' '{printf("/rackID-%s",$2)}'