HDFS-8211. DataNode UUID is always null in the JMX counter. (Contributed by Anu Engineer)

This commit is contained in:
Arpit Agarwal 2015-04-24 16:47:48 -07:00
parent 3884948d6c
commit 932cff610a
3 changed files with 70 additions and 2 deletions

View File

@ -239,6 +239,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8191. Fix byte to integer casting in SimulatedFSDataset#simulatedByte.
(Zhe Zhang via wang)
HDFS-8211. DataNode UUID is always null in the JMX counter. (Anu Engineer
via Arpit Agarwal)
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -1233,7 +1233,7 @@ public static String generateUuid() {
*
* @throws IOException
*/
private synchronized void checkDatanodeUuid() throws IOException {
synchronized void checkDatanodeUuid() throws IOException {
if (storage.getDatanodeUuid() == null) {
storage.setDatanodeUuid(generateUuid());
storage.writeAll();
@ -3166,7 +3166,7 @@ public DNConf getDnConf() {
}
public String getDatanodeUuid() {
return id == null ? null : id.getDatanodeUuid();
return storage == null ? null : storage.getDatanodeUuid();
}
boolean shouldRun() {

View File

@ -0,0 +1,65 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.junit.Test;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
public class TestDataNodeUUID {
/**
* This test makes sure that we have a valid
* Node ID after the checkNodeUUID is done.
*/
@Test
public void testDatanodeUuid() throws Exception {
final InetSocketAddress NN_ADDR = new InetSocketAddress(
"localhost", 5020);
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
FileSystem.setDefaultUri(conf,
"hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
ArrayList<StorageLocation> locations = new ArrayList<>();
DataNode dn = new DataNode(conf, locations, null);
//Assert that Node iD is null
String nullString = null;
assertEquals(dn.getDatanodeUuid(), nullString);
// CheckDataNodeUUID will create an UUID if UUID is null
dn.checkDatanodeUuid();
// Make sure that we have a valid DataNodeUUID at that point of time.
assertNotEquals(dn.getDatanodeUuid(), nullString);
}
}