HBASE-26899 Run spotless:apply

Closes #4312
This commit is contained in:
Duo Zhang 2022-05-01 22:15:04 +08:00
parent 0edecbf9e0
commit 9c8c9e7fbf
4645 changed files with 110099 additions and 131240 deletions

View File

@ -106,5 +106,3 @@ else
echo "No command specified" >&2
exit 1
fi

View File

@ -33,4 +33,3 @@ export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin:
export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData"}"
ulimit -n

View File

@ -1,4 +1,4 @@
<?xml version="1.0"?>
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
@ -21,8 +21,8 @@
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase</artifactId>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>

View File

@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to the client. This tests the hbase-client package and all of the client
* tests in hbase-server.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to coprocessors.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as failing commonly on public build infrastructure.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and
* the like.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,23 +15,20 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as 'integration/system' test, meaning that the test class has the following
* characteristics:
* <ul>
* <li> Possibly takes hours to complete</li>
* <li> Can be run on a mini cluster or an actual cluster</li>
* <li> Can make changes to the given cluster (starting stopping daemons, etc)</li>
* <li> Should not be run in parallel of other integration tests</li>
* <li>Possibly takes hours to complete</li>
* <li>Can be run on a mini cluster or an actual cluster</li>
* <li>Can make changes to the given cluster (starting stopping daemons, etc)</li>
* <li>Should not be run in parallel of other integration tests</li>
* </ul>
*
* Integration / System tests should have a class name starting with "IntegrationTest", and
* should be annotated with @Category(IntegrationTests.class). Integration tests can be run
* using the IntegrationTestsDriver class or from mvn verify.
*
* Integration / System tests should have a class name starting with "IntegrationTest", and should
* be annotated with @Category(IntegrationTests.class). Integration tests can be run using the
* IntegrationTestsDriver class or from mvn verify.
* @see SmallTests
* @see MediumTests
* @see LargeTests

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,21 +15,19 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tagging a test as 'large', means that the test class has the following characteristics:
* <ul>
* <li>it can executed in an isolated JVM (Tests can however be executed in different JVM on the
* same machine simultaneously so be careful two concurrent tests end up fighting over ports
* or other singular resources).</li>
* <li>ideally, the whole large test-suite/class, no matter how many or how few test methods it
* has, will run in last less than three minutes</li>
* same machine simultaneously so be careful two concurrent tests end up fighting over ports or
* other singular resources).</li>
* <li>ideally, the whole large test-suite/class, no matter how many or how few test methods it has,
* will run in last less than three minutes</li>
* <li>No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests'
* if you need to run tests longer than this.</li>
* </ul>
*
* @see SmallTests
* @see MediumTests
* @see IntegrationTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to mapred or mapreduce.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to the master.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,21 +15,18 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tagging a test as 'medium' means that the test class has the following characteristics:
* <ul>
* <li>it can be executed in an isolated JVM (Tests can however be executed in different JVMs on
* the same machine simultaneously so be careful two concurrent tests end up fighting over ports
* or other singular resources).</li>
* <li>it can be executed in an isolated JVM (Tests can however be executed in different JVMs on the
* same machine simultaneously so be careful two concurrent tests end up fighting over ports or
* other singular resources).</li>
* <li>ideally, the whole medium test-suite/class, no matter how many or how few test methods it
* has, will complete in 50 seconds; otherwise make it a 'large' test.</li>
* </ul>
*
* Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster.
*
* @see SmallTests
* @see LargeTests
* @see IntegrationTests

View File

@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as not easily falling into any of the below categories.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to RPC.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to the regionserver.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to replication.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to the REST capability of HBase.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to security.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -21,13 +21,13 @@ package org.apache.hadoop.hbase.testclassification;
* Tagging a test as 'small' means that the test class has the following characteristics:
* <ul>
* <li>it can be run simultaneously with other small tests all in the same JVM</li>
* <li>ideally, the WHOLE implementing test-suite/class, no matter how many or how few test
* methods it has, should take less than 15 seconds to complete</li>
* <li>ideally, the WHOLE implementing test-suite/class, no matter how many or how few test methods
* it has, should take less than 15 seconds to complete</li>
* <li>it does not use a cluster</li>
* </ul>
*
* @see MediumTests
* @see LargeTests
* @see IntegrationTests
*/
public interface SmallTests {}
public interface SmallTests {
}

View File

@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build
* infrastructure.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as region tests which takes longer than 5 minutes to run on public build
* infrastructure.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.testclassification;
/**

View File

@ -1,6 +1,5 @@
<?xml version="1.0"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
@ -23,8 +22,8 @@
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase-archetypes</artifactId>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-archetypes</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
@ -58,10 +57,10 @@
further using xml-maven-plugin for xslt transformation, below. -->
<execution>
<id>hbase-client__copy-src-to-build-archetype-subdir</id>
<phase>generate-resources</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${build.archetype.subdir}</outputDirectory>
<resources>
@ -76,16 +75,17 @@
</execution>
<execution>
<id>hbase-client__copy-pom-to-temp-for-xslt-processing</id>
<phase>generate-resources</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.exemplar.subdir}</outputDirectory>
<resources>
<resource>
<directory>/${project.basedir}/../${hbase-client.dir}</directory>
<filtering>true</filtering> <!-- filtering replaces ${project.version} with literal -->
<filtering>true</filtering>
<!-- filtering replaces ${project.version} with literal -->
<includes>
<include>pom.xml</include>
</includes>
@ -95,10 +95,10 @@
</execution>
<execution>
<id>hbase-shaded-client__copy-src-to-build-archetype-subdir</id>
<phase>generate-resources</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${build.archetype.subdir}</outputDirectory>
<resources>
@ -113,16 +113,17 @@
</execution>
<execution>
<id>hbase-shaded-client__copy-pom-to-temp-for-xslt-processing</id>
<phase>generate-resources</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.exemplar.subdir}</outputDirectory>
<resources>
<resource>
<directory>/${project.basedir}/../${hbase-shaded-client.dir}</directory>
<filtering>true</filtering> <!-- filtering replaces ${project.version} with literal -->
<filtering>true</filtering>
<!-- filtering replaces ${project.version} with literal -->
<includes>
<include>pom.xml</include>
</includes>
@ -137,10 +138,10 @@
using xml-maven-plugin for xslt transformation, below. -->
<execution>
<id>hbase-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id>
<phase>prepare-package</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<phase>prepare-package</phase>
<configuration>
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.archetype.subdir}</outputDirectory>
<resources>
@ -155,10 +156,10 @@
</execution>
<execution>
<id>hbase-shaded-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id>
<phase>prepare-package</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<phase>prepare-package</phase>
<configuration>
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.archetype.subdir}</outputDirectory>
<resources>
@ -182,10 +183,10 @@
<!-- xml-maven-plugin modifies each exemplar project's pom.xml file to convert to standalone project. -->
<execution>
<id>modify-exemplar-pom-files-via-xslt</id>
<phase>process-resources</phase>
<goals>
<goal>transform</goal>
</goals>
<phase>process-resources</phase>
<configuration>
<transformationSets>
<transformationSet>
@ -212,10 +213,10 @@
prevent warnings when project is generated from archetype. -->
<execution>
<id>modify-archetype-pom-files-via-xslt</id>
<phase>package</phase>
<goals>
<goal>transform</goal>
</goals>
<phase>package</phase>
<configuration>
<transformationSets>
<transformationSet>
@ -247,25 +248,25 @@
<!-- exec-maven-plugin executes chmod to make scripts executable -->
<execution>
<id>make-scripts-executable</id>
<phase>process-resources</phase>
<goals>
<goal>run</goal>
</goals>
<phase>process-resources</phase>
<configuration>
<chmod file="${project.basedir}/createArchetypes.sh" perm="+x" />
<chmod file="${project.basedir}/installArchetypes.sh" perm="+x" />
<chmod file="${project.basedir}/createArchetypes.sh" perm="+x"/>
<chmod file="${project.basedir}/installArchetypes.sh" perm="+x"/>
</configuration>
</execution>
<!-- exec-maven-plugin executes script which invokes 'archetype:create-from-project'
to derive archetypes from exemplar projects. -->
<execution>
<id>run-createArchetypes-script</id>
<phase>compile</phase>
<goals>
<goal>run</goal>
</goals>
<phase>compile</phase>
<configuration>
<exec executable="${shell-executable}" dir="${project.basedir}" failonerror="true">
<exec dir="${project.basedir}" executable="${shell-executable}" failonerror="true">
<arg line="./createArchetypes.sh"/>
</exec>
</configuration>
@ -277,12 +278,12 @@
which does test generation of a project based on the archetype. -->
<execution>
<id>run-installArchetypes-script</id>
<phase>install</phase>
<goals>
<goal>run</goal>
</goals>
<phase>install</phase>
<configuration>
<exec executable="${shell-executable}" dir="${project.basedir}" failonerror="true">
<exec dir="${project.basedir}" executable="${shell-executable}" failonerror="true">
<arg line="./installArchetypes.sh"/>
</exec>
</configuration>

View File

@ -1,8 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0"
xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation=
"https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
@ -24,8 +21,8 @@
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase-archetypes</artifactId>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-archetypes</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>

View File

@ -1,5 +1,4 @@
/**
*
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -38,19 +37,17 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Successful running of this application requires access to an active instance
* of HBase. For install instructions for a standalone instance of HBase, please
* refer to https://hbase.apache.org/book.html#quickstart
* Successful running of this application requires access to an active instance of HBase. For
* install instructions for a standalone instance of HBase, please refer to
* https://hbase.apache.org/book.html#quickstart
*/
public final class HelloHBase {
protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
static final byte[] MY_FIRST_COLUMN_QUALIFIER
= Bytes.toBytes("myFirstColumn");
static final byte[] MY_SECOND_COLUMN_QUALIFIER
= Bytes.toBytes("mySecondColumn");
static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn");
static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn");
static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01");
// Private constructor included here to avoid checkstyle warnings
@ -61,15 +58,15 @@ public final class HelloHBase {
final boolean deleteAllAtEOJ = true;
/**
* ConnectionFactory#createConnection() automatically looks for
* hbase-site.xml (HBase configuration parameters) on the system's
* CLASSPATH, to enable creation of Connection to HBase via ZooKeeper.
* ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase
* configuration parameters) on the system's CLASSPATH, to enable creation of Connection to
* HBase via ZooKeeper.
*/
try (Connection connection = ConnectionFactory.createConnection();
Admin admin = connection.getAdmin()) {
admin.getClusterMetrics(); // assure connection successfully established
System.out.println("\n*** Hello HBase! -- Connection has been "
+ "established via ZooKeeper!!\n");
System.out
.println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n");
createNamespaceAndTable(admin);
@ -92,9 +89,8 @@ public final class HelloHBase {
}
/**
* Invokes Admin#createNamespace and Admin#createTable to create a namespace
* with a table that has one column-family.
*
* Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has
* one column-family.
* @param admin Standard Admin object
* @throws IOException If IO problem encountered
*/
@ -103,48 +99,38 @@ public final class HelloHBase {
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
admin.createNamespace(NamespaceDescriptor
.create(MY_NAMESPACE_NAME).build());
admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build());
}
if (!admin.tableExists(MY_TABLE_NAME)) {
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
+ "], with one Column Family ["
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
+ "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME))
.build();
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build();
admin.createTable(desc);
}
}
/**
* Invokes Table#put to store a row (with two new columns created 'on the
* fly') into the table.
*
* Invokes Table#put to store a row (with two new columns created 'on the fly') into the table.
* @param table Standard Table object (used for CRUD operations).
* @throws IOException If IO problem encountered
*/
static void putRowToTable(final Table table) throws IOException {
table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME,
MY_FIRST_COLUMN_QUALIFIER,
Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME,
MY_SECOND_COLUMN_QUALIFIER,
Bytes.toBytes("World!")));
table.put(new Put(MY_ROW_ID)
.addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello"))
.addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!")));
System.out.println("Row [" + Bytes.toString(MY_ROW_ID)
+ "] was put into Table ["
System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table ["
+ table.getName().getNameAsString() + "] in HBase;\n"
+ " the row's two columns (created 'on the fly') are: ["
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
+ Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
}
/**
* Invokes Table#get and prints out the contents of the retrieved row.
*
* @param table Standard Table object
* @throws IOException If IO problem encountered
*/
@ -152,20 +138,16 @@ public final class HelloHBase {
Result row = table.get(new Get(MY_ROW_ID));
System.out.println("Row [" + Bytes.toString(row.getRow())
+ "] was retrieved from Table ["
+ table.getName().getNameAsString()
+ "] in HBase, with the following content:");
System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table ["
+ table.getName().getNameAsString() + "] in HBase, with the following content:");
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry
: row.getNoVersionMap().entrySet()) {
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap()
.entrySet()) {
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
System.out.println(" Columns in Column Family [" + columnFamilyName
+ "]:");
System.out.println(" Columns in Column Family [" + columnFamilyName + "]:");
for (Entry<byte[], byte[]> columnNameAndValueMap
: colFamilyEntry.getValue().entrySet()) {
for (Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) {
System.out.println(" Value of Column [" + columnFamilyName + ":"
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
@ -176,14 +158,12 @@ public final class HelloHBase {
/**
* Checks to see whether a namespace exists.
*
* @param admin Standard Admin object
* @param namespaceName Name of namespace
* @return true If namespace exists
* @throws IOException If IO problem encountered
*/
static boolean namespaceExists(final Admin admin, final String namespaceName)
throws IOException {
static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException {
try {
admin.getNamespaceDescriptor(namespaceName);
} catch (NamespaceNotFoundException e) {
@ -194,28 +174,24 @@ public final class HelloHBase {
/**
* Invokes Table#delete to delete test data (i.e. the row)
*
* @param table Standard Table object
* @throws IOException If IO problem is encountered
*/
static void deleteRow(final Table table) throws IOException {
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID)
+ "] from Table ["
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table ["
+ table.getName().getNameAsString() + "].");
table.delete(new Delete(MY_ROW_ID));
}
/**
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to
* disable/delete Table and delete Namespace.
*
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete
* Table and delete Namespace.
* @param admin Standard Admin object
* @throws IOException If IO problem is encountered
*/
static void deleteNamespaceAndTable(final Admin admin) throws IOException {
if (admin.tableExists(MY_TABLE_NAME)) {
System.out.println("Disabling/deleting Table ["
+ MY_TABLE_NAME.getNameAsString() + "].");
System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "].");
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
admin.deleteTable(MY_TABLE_NAME);
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -46,8 +46,7 @@ public class TestHelloHBase {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestHelloHBase.class);
private static final HBaseTestingUtil TEST_UTIL
= new HBaseTestingUtil();
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
@BeforeClass
public static void beforeClass() throws Exception {
@ -67,13 +66,11 @@ public class TestHelloHBase {
Admin admin = TEST_UTIL.getAdmin();
exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE);
assertEquals("#namespaceExists failed: found nonexistent namespace.",
false, exists);
assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists);
admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build());
exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE);
assertEquals("#namespaceExists failed: did NOT find existing namespace.",
true, exists);
assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists);
admin.deleteNamespace(EXISTING_NAMESPACE);
}
@ -82,14 +79,11 @@ public class TestHelloHBase {
Admin admin = TEST_UTIL.getAdmin();
HelloHBase.createNamespaceAndTable(admin);
boolean namespaceExists
= HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
assertEquals("#createNamespaceAndTable failed to create namespace.",
true, namespaceExists);
boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists);
boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME);
assertEquals("#createNamespaceAndTable failed to create table.",
true, tableExists);
assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists);
admin.disableTable(HelloHBase.MY_TABLE_NAME);
admin.deleteTable(HelloHBase.MY_TABLE_NAME);
@ -100,8 +94,7 @@ public class TestHelloHBase {
public void testPutRowToTable() throws IOException {
Admin admin = TEST_UTIL.getAdmin();
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
Table table
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
HelloHBase.putRowToTable(table);
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
@ -115,13 +108,10 @@ public class TestHelloHBase {
public void testDeleteRow() throws IOException {
Admin admin = TEST_UTIL.getAdmin();
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
Table table
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
table.put(new Put(HelloHBase.MY_ROW_ID).
addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
HelloHBase.MY_FIRST_COLUMN_QUALIFIER,
Bytes.toBytes("xyz")));
table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz")));
HelloHBase.deleteRow(table);
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
assertEquals("#deleteRow failed to delete row.", true, row.isEmpty());

View File

@ -1,8 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0"
xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation=
"https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
@ -24,8 +21,8 @@
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase-archetypes</artifactId>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-archetypes</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>

View File

@ -1,5 +1,4 @@
/**
*
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -37,19 +36,17 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Successful running of this application requires access to an active instance
* of HBase. For install instructions for a standalone instance of HBase, please
* refer to https://hbase.apache.org/book.html#quickstart
* Successful running of this application requires access to an active instance of HBase. For
* install instructions for a standalone instance of HBase, please refer to
* https://hbase.apache.org/book.html#quickstart
*/
public final class HelloHBase {
protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
static final byte[] MY_FIRST_COLUMN_QUALIFIER
= Bytes.toBytes("myFirstColumn");
static final byte[] MY_SECOND_COLUMN_QUALIFIER
= Bytes.toBytes("mySecondColumn");
static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn");
static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn");
static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01");
// Private constructor included here to avoid checkstyle warnings
@ -60,15 +57,15 @@ public final class HelloHBase {
final boolean deleteAllAtEOJ = true;
/**
* ConnectionFactory#createConnection() automatically looks for
* hbase-site.xml (HBase configuration parameters) on the system's
* CLASSPATH, to enable creation of Connection to HBase via ZooKeeper.
* ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase
* configuration parameters) on the system's CLASSPATH, to enable creation of Connection to
* HBase via ZooKeeper.
*/
try (Connection connection = ConnectionFactory.createConnection();
Admin admin = connection.getAdmin()) {
admin.getClusterMetrics(); // assure connection successfully established
System.out.println("\n*** Hello HBase! -- Connection has been "
+ "established via ZooKeeper!!\n");
System.out
.println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n");
createNamespaceAndTable(admin);
@ -91,9 +88,8 @@ public final class HelloHBase {
}
/**
* Invokes Admin#createNamespace and Admin#createTable to create a namespace
* with a table that has one column-family.
*
* Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has
* one column-family.
* @param admin Standard Admin object
* @throws IOException If IO problem encountered
*/
@ -102,13 +98,11 @@ public final class HelloHBase {
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
admin.createNamespace(NamespaceDescriptor
.create(MY_NAMESPACE_NAME).build());
admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build());
}
if (!admin.tableExists(MY_TABLE_NAME)) {
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
+ "], with one Column Family ["
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
+ "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
admin.createTable(TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build());
@ -116,33 +110,26 @@ public final class HelloHBase {
}
/**
* Invokes Table#put to store a row (with two new columns created 'on the
* fly') into the table.
*
* Invokes Table#put to store a row (with two new columns created 'on the fly') into the table.
* @param table Standard Table object (used for CRUD operations).
* @throws IOException If IO problem encountered
*/
static void putRowToTable(final Table table) throws IOException {
table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME,
MY_FIRST_COLUMN_QUALIFIER,
Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME,
MY_SECOND_COLUMN_QUALIFIER,
Bytes.toBytes("World!")));
table.put(new Put(MY_ROW_ID)
.addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello"))
.addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!")));
System.out.println("Row [" + Bytes.toString(MY_ROW_ID)
+ "] was put into Table ["
System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table ["
+ table.getName().getNameAsString() + "] in HBase;\n"
+ " the row's two columns (created 'on the fly') are: ["
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
+ Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
}
/**
* Invokes Table#get and prints out the contents of the retrieved row.
*
* @param table Standard Table object
* @throws IOException If IO problem encountered
*/
@ -150,20 +137,16 @@ public final class HelloHBase {
Result row = table.get(new Get(MY_ROW_ID));
System.out.println("Row [" + Bytes.toString(row.getRow())
+ "] was retrieved from Table ["
+ table.getName().getNameAsString()
+ "] in HBase, with the following content:");
System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table ["
+ table.getName().getNameAsString() + "] in HBase, with the following content:");
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry
: row.getNoVersionMap().entrySet()) {
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap()
.entrySet()) {
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
System.out.println(" Columns in Column Family [" + columnFamilyName
+ "]:");
System.out.println(" Columns in Column Family [" + columnFamilyName + "]:");
for (Entry<byte[], byte[]> columnNameAndValueMap
: colFamilyEntry.getValue().entrySet()) {
for (Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) {
System.out.println(" Value of Column [" + columnFamilyName + ":"
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
@ -174,14 +157,12 @@ public final class HelloHBase {
/**
* Checks to see whether a namespace exists.
*
* @param admin Standard Admin object
* @param namespaceName Name of namespace
* @return true If namespace exists
* @throws IOException If IO problem encountered
*/
static boolean namespaceExists(final Admin admin, final String namespaceName)
throws IOException {
static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException {
try {
admin.getNamespaceDescriptor(namespaceName);
} catch (NamespaceNotFoundException e) {
@ -192,28 +173,24 @@ public final class HelloHBase {
/**
* Invokes Table#delete to delete test data (i.e. the row)
*
* @param table Standard Table object
* @throws IOException If IO problem is encountered
*/
static void deleteRow(final Table table) throws IOException {
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID)
+ "] from Table ["
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table ["
+ table.getName().getNameAsString() + "].");
table.delete(new Delete(MY_ROW_ID));
}
/**
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to
* disable/delete Table and delete Namespace.
*
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete
* Table and delete Namespace.
* @param admin Standard Admin object
* @throws IOException If IO problem is encountered
*/
static void deleteNamespaceAndTable(final Admin admin) throws IOException {
if (admin.tableExists(MY_TABLE_NAME)) {
System.out.println("Disabling/deleting Table ["
+ MY_TABLE_NAME.getNameAsString() + "].");
System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "].");
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
admin.deleteTable(MY_TABLE_NAME);
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -46,8 +46,7 @@ public class TestHelloHBase {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestHelloHBase.class);
private static final HBaseTestingUtil TEST_UTIL
= new HBaseTestingUtil();
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
@BeforeClass
public static void beforeClass() throws Exception {
@ -67,13 +66,11 @@ public class TestHelloHBase {
Admin admin = TEST_UTIL.getAdmin();
exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE);
assertEquals("#namespaceExists failed: found nonexistent namespace.",
false, exists);
assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists);
admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build());
exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE);
assertEquals("#namespaceExists failed: did NOT find existing namespace.",
true, exists);
assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists);
admin.deleteNamespace(EXISTING_NAMESPACE);
}
@ -82,14 +79,11 @@ public class TestHelloHBase {
Admin admin = TEST_UTIL.getAdmin();
HelloHBase.createNamespaceAndTable(admin);
boolean namespaceExists
= HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
assertEquals("#createNamespaceAndTable failed to create namespace.",
true, namespaceExists);
boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists);
boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME);
assertEquals("#createNamespaceAndTable failed to create table.",
true, tableExists);
assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists);
admin.disableTable(HelloHBase.MY_TABLE_NAME);
admin.deleteTable(HelloHBase.MY_TABLE_NAME);
@ -100,8 +94,7 @@ public class TestHelloHBase {
public void testPutRowToTable() throws IOException {
Admin admin = TEST_UTIL.getAdmin();
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
Table table
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
HelloHBase.putRowToTable(table);
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
@ -115,13 +108,10 @@ public class TestHelloHBase {
public void testDeleteRow() throws IOException {
Admin admin = TEST_UTIL.getAdmin();
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
Table table
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
table.put(new Put(HelloHBase.MY_ROW_ID).
addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
HelloHBase.MY_FIRST_COLUMN_QUALIFIER,
Bytes.toBytes("xyz")));
table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz")));
HelloHBase.deleteRow(table);
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
assertEquals("#deleteRow failed to delete row.", true, row.isEmpty());

View File

@ -1,6 +1,5 @@
<?xml version="1.0"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
@ -22,8 +21,8 @@
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase-build-configuration</artifactId>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-build-configuration</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>../hbase-build-configuration</relativePath>
</parent>
@ -68,10 +67,10 @@
<artifactId>spotbugs-maven-plugin</artifactId>
<executions>
<execution>
<inherited>false</inherited>
<goals>
<goal>spotbugs</goal>
</goals>
<inherited>false</inherited>
<configuration>
<excludeFilterFile>${project.basedir}/../dev-support/spotbugs-exclude.xml</excludeFilterFile>
</configuration>

View File

@ -1,4 +1,4 @@
<?xml version="1.0"?>
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
@ -21,160 +21,18 @@
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase-build-configuration</artifactId>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-build-configuration</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>../hbase-build-configuration</relativePath>
</parent>
<artifactId>hbase-assembly</artifactId>
<name>Apache HBase - Assembly</name>
<description>
Module that does project assembly and that is all that it does.
</description>
<packaging>pom</packaging>
<name>Apache HBase - Assembly</name>
<description>Module that does project assembly and that is all that it does.</description>
<properties>
<license.bundles.dependencies>true</license.bundles.dependencies>
</properties>
<build>
<plugins>
<!-- licensing info from our dependencies -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-remote-resources-plugin</artifactId>
<executions>
<execution>
<id>aggregate-licenses</id>
<goals>
<goal>process</goal>
</goals>
<configuration>
<properties>
<copyright-end-year>${build.year}</copyright-end-year>
<debug-print-included-work-info>${license.debug.print.included}</debug-print-included-work-info>
<bundled-dependencies>${license.bundles.dependencies}</bundled-dependencies>
<bundled-jquery>${license.bundles.jquery}</bundled-jquery>
<bundled-vega>${license.bundles.vega}</bundled-vega>
<bundled-logo>${license.bundles.logo}</bundled-logo>
<bundled-bootstrap>${license.bundles.bootstrap}</bundled-bootstrap>
</properties>
<resourceBundles>
<resourceBundle>${project.groupId}:hbase-resource-bundle:${project.version}</resourceBundle>
</resourceBundles>
<supplementalModelArtifacts>
<supplementalModelArtifact>${project.groupId}:hbase-resource-bundle:${project.version}</supplementalModelArtifact>
</supplementalModelArtifacts>
<supplementalModels>
<supplementalModel>supplemental-models.xml</supplementalModel>
</supplementalModels>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<!--Else will use hbase-assembly as final name.-->
<finalName>hbase-${project.version}</finalName>
<skipAssembly>false</skipAssembly>
<appendAssemblyId>true</appendAssemblyId>
<tarLongFileMode>posix</tarLongFileMode>
<descriptors>
<descriptor>${assembly.file}</descriptor>
<descriptor>src/main/assembly/client.xml</descriptor>
</descriptors>
</configuration>
</plugin>
<plugin>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<!-- generates the file that will be used by the bin/hbase script in the dev env -->
<id>create-hbase-generated-classpath</id>
<phase>test</phase>
<goals>
<goal>build-classpath</goal>
</goals>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath.txt</outputFile>
<excludeArtifactIds>jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce</excludeArtifactIds>
</configuration>
</execution>
<execution>
<!-- generates the file that will be used by the bin/hbase zkcli script in the dev env -->
<id>create-hbase-generated-classpath-jline</id>
<phase>test</phase>
<goals>
<goal>build-classpath</goal>
</goals>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath_jline.txt</outputFile>
<includeArtifactIds>jline</includeArtifactIds>
</configuration>
</execution>
<execution>
<!-- generates the file that will be used by the bin/hbase shell script in the dev env -->
<id>create-hbase-generated-classpath-jruby</id>
<phase>test</phase>
<goals>
<goal>build-classpath</goal>
</goals>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath_jruby.txt</outputFile>
<includeArtifactIds>jruby-complete</includeArtifactIds>
</configuration>
</execution>
<!--
Build an aggregation of our templated NOTICE file and the NOTICE files in our dependencies.
If MASSEMBLY-382 is fixed we could do this in the assembly
Currently relies on env, bash, find, and cat.
-->
<execution>
<!-- put all of the NOTICE files out of our dependencies -->
<id>unpack-dependency-notices</id>
<phase>prepare-package</phase>
<goals>
<goal>unpack-dependencies</goal>
</goals>
<configuration>
<excludeTypes>pom</excludeTypes>
<useSubDirectoryPerArtifact>true</useSubDirectoryPerArtifact>
<includes>**\/NOTICE,**\/NOTICE.txt</includes>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>${exec.maven.version}</version>
<executions>
<execution>
<id>concat-NOTICE-files</id>
<phase>package</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<executable>env</executable>
<arguments>
<argument>bash</argument>
<argument>-c</argument>
<argument>cat maven-shared-archive-resources/META-INF/NOTICE \
`find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt`
</argument>
</arguments>
<outputFile>${project.build.directory}/NOTICE.aggregate</outputFile>
<workingDirectory>${project.build.directory}</workingDirectory>
</configuration>
</execution>
</executions>
</plugin>
<!-- /end building aggregation of NOTICE files -->
</plugins>
</build>
<dependencies>
<!-- client artifacts for downstream use -->
<dependency>
@ -390,4 +248,143 @@
<scope>compile</scope>
</dependency>
</dependencies>
<build>
<plugins>
<!-- licensing info from our dependencies -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-remote-resources-plugin</artifactId>
<executions>
<execution>
<id>aggregate-licenses</id>
<goals>
<goal>process</goal>
</goals>
<configuration>
<properties>
<copyright-end-year>${build.year}</copyright-end-year>
<debug-print-included-work-info>${license.debug.print.included}</debug-print-included-work-info>
<bundled-dependencies>${license.bundles.dependencies}</bundled-dependencies>
<bundled-jquery>${license.bundles.jquery}</bundled-jquery>
<bundled-vega>${license.bundles.vega}</bundled-vega>
<bundled-logo>${license.bundles.logo}</bundled-logo>
<bundled-bootstrap>${license.bundles.bootstrap}</bundled-bootstrap>
</properties>
<resourceBundles>
<resourceBundle>${project.groupId}:hbase-resource-bundle:${project.version}</resourceBundle>
</resourceBundles>
<supplementalModelArtifacts>
<supplementalModelArtifact>${project.groupId}:hbase-resource-bundle:${project.version}</supplementalModelArtifact>
</supplementalModelArtifacts>
<supplementalModels>
<supplementalModel>supplemental-models.xml</supplementalModel>
</supplementalModels>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<!--Else will use hbase-assembly as final name.-->
<finalName>hbase-${project.version}</finalName>
<skipAssembly>false</skipAssembly>
<appendAssemblyId>true</appendAssemblyId>
<tarLongFileMode>posix</tarLongFileMode>
<descriptors>
<descriptor>${assembly.file}</descriptor>
<descriptor>src/main/assembly/client.xml</descriptor>
</descriptors>
</configuration>
</plugin>
<plugin>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<!-- generates the file that will be used by the bin/hbase script in the dev env -->
<id>create-hbase-generated-classpath</id>
<goals>
<goal>build-classpath</goal>
</goals>
<phase>test</phase>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath.txt</outputFile>
<excludeArtifactIds>jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce</excludeArtifactIds>
</configuration>
</execution>
<execution>
<!-- generates the file that will be used by the bin/hbase zkcli script in the dev env -->
<id>create-hbase-generated-classpath-jline</id>
<goals>
<goal>build-classpath</goal>
</goals>
<phase>test</phase>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath_jline.txt</outputFile>
<includeArtifactIds>jline</includeArtifactIds>
</configuration>
</execution>
<execution>
<!-- generates the file that will be used by the bin/hbase shell script in the dev env -->
<id>create-hbase-generated-classpath-jruby</id>
<goals>
<goal>build-classpath</goal>
</goals>
<phase>test</phase>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath_jruby.txt</outputFile>
<includeArtifactIds>jruby-complete</includeArtifactIds>
</configuration>
</execution>
<!--
Build an aggregation of our templated NOTICE file and the NOTICE files in our dependencies.
If MASSEMBLY-382 is fixed we could do this in the assembly
Currently relies on env, bash, find, and cat.
-->
<execution>
<!-- put all of the NOTICE files out of our dependencies -->
<id>unpack-dependency-notices</id>
<goals>
<goal>unpack-dependencies</goal>
</goals>
<phase>prepare-package</phase>
<configuration>
<excludeTypes>pom</excludeTypes>
<useSubDirectoryPerArtifact>true</useSubDirectoryPerArtifact>
<includes>**\/NOTICE,**\/NOTICE.txt</includes>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>${exec.maven.version}</version>
<executions>
<execution>
<id>concat-NOTICE-files</id>
<goals>
<goal>exec</goal>
</goals>
<phase>package</phase>
<configuration>
<executable>env</executable>
<arguments>
<argument>bash</argument>
<argument>-c</argument>
<argument>cat maven-shared-archive-resources/META-INF/NOTICE \
`find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt`</argument>
</arguments>
<outputFile>${project.build.directory}/NOTICE.aggregate</outputFile>
<workingDirectory>${project.build.directory}</workingDirectory>
</configuration>
</execution>
</executions>
</plugin>
<!-- /end building aggregation of NOTICE files -->
</plugins>
</build>
</project>

View File

@ -1,6 +1,5 @@
<?xml version="1.0"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
@ -22,8 +21,8 @@
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase-build-configuration</artifactId>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-build-configuration</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>../hbase-build-configuration</relativePath>
</parent>
@ -31,33 +30,6 @@
<artifactId>hbase-asyncfs</artifactId>
<name>Apache HBase - Asynchronous FileSystem</name>
<description>HBase Asynchronous FileSystem Implementation for WAL</description>
<build>
<plugins>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<plugin>
<groupId>net.revelc.code</groupId>
<artifactId>warbucks-maven-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<configuration>
<failOnViolation>true</failOnViolation>
</configuration>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
@ -169,13 +141,42 @@
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<plugin>
<groupId>net.revelc.code</groupId>
<artifactId>warbucks-maven-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<configuration>
<failOnViolation>true</failOnViolation>
</configuration>
</plugin>
</plugins>
</build>
<profiles>
<!-- Profiles for building against different hadoop versions -->
<profile>
<id>hadoop-3.0</id>
<activation>
<property><name>!hadoop.profile</name></property>
<property>
<name>!hadoop.profile</name>
</property>
</activation>
<dependencies>
<dependency>
@ -224,8 +225,7 @@
<artifactId>lifecycle-mapping</artifactId>
<configuration>
<lifecycleMappingMetadata>
<pluginExecutions>
</pluginExecutions>
<pluginExecutions/>
</lifecycleMappingMetadata>
</configuration>
</plugin>

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -21,10 +21,9 @@ import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.concurrent.CompletableFuture;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Interface for asynchronous filesystem output stream.

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -180,7 +180,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
// State for connections to DN
private enum State {
STREAMING, CLOSING, BROKEN, CLOSED
STREAMING,
CLOSING,
BROKEN,
CLOSED
}
private volatile State state;
@ -284,13 +287,13 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception {
Status reply = getStatus(ack);
if (reply != Status.SUCCESS) {
failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " +
block + " from datanode " + ctx.channel().remoteAddress()));
failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + block
+ " from datanode " + ctx.channel().remoteAddress()));
return;
}
if (PipelineAck.isRestartOOBStatus(reply)) {
failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " +
block + " from datanode " + ctx.channel().remoteAddress()));
failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block "
+ block + " from datanode " + ctx.channel().remoteAddress()));
return;
}
if (ack.getSeqno() == HEART_BEAT_SEQNO) {
@ -345,10 +348,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
}
}
FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs,
DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId,
LocatedBlock locatedBlock, Encryptor encryptor, Map<Channel, DatanodeInfo> datanodeInfoMap,
DataChecksum summer, ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) {
FanOutOneBlockAsyncDFSOutput(Configuration conf, DistributedFileSystem dfs, DFSClient client,
ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock,
Encryptor encryptor, Map<Channel, DatanodeInfo> datanodeInfoMap, DataChecksum summer,
ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) {
this.conf = conf;
this.dfs = dfs;
this.client = client;
@ -418,8 +421,8 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
ByteBuf headerBuf = alloc.buffer(headerLen);
header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
headerBuf.writerIndex(headerLen);
Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen,
datanodeInfoMap.keySet(), dataLen);
Callback c =
new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeInfoMap.keySet(), dataLen);
waitingAckQueue.addLast(c);
// recheck again after we pushed the callback to queue
if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) {

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -145,9 +145,8 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
// helper class for creating files.
private interface FileCreator {
default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent,
short replication, long blockSize, CryptoProtocolVersion[] supportedVersions)
throws Exception {
String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication,
long blockSize, CryptoProtocolVersion[] supportedVersions) throws Exception {
try {
return (HdfsFileStatus) createObject(instance, src, masked, clientName, flag, createParent,
replication, blockSize, supportedVersions);
@ -249,9 +248,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
LEASE_MANAGER = createLeaseManager();
FILE_CREATOR = createFileCreator();
} catch (Exception e) {
String msg = "Couldn't properly initialize access to HDFS internals. Please " +
"update your WAL Provider to not make use of the 'asyncfs' provider. See " +
"HBASE-16110 for more information.";
String msg = "Couldn't properly initialize access to HDFS internals. Please "
+ "update your WAL Provider to not make use of the 'asyncfs' provider. See "
+ "HBASE-16110 for more information.";
LOG.error(msg, e);
throw new Error(msg, e);
}
@ -298,11 +297,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink();
if (resp.getStatus() != Status.SUCCESS) {
if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException("Got access token error" + ", status message " +
resp.getMessage() + ", " + logInfo);
throw new InvalidBlockTokenException("Got access token error" + ", status message "
+ resp.getMessage() + ", " + logInfo);
} else {
throw new IOException("Got error" + ", status=" + resp.getStatus().name() +
", status message " + resp.getMessage() + ", " + logInfo);
throw new IOException("Got error" + ", status=" + resp.getStatus().name()
+ ", status message " + resp.getMessage() + ", " + logInfo);
}
}
// success
@ -392,11 +391,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
.setToken(PBHelperClient.convert(locatedBlock.getBlockToken())))
.setClientName(clientName).build();
ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder()
.setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()))
.setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes())
.setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS)
.setRequestedChecksum(checksumProto)
OpWriteBlockProto.Builder writeBlockProtoBuilder =
OpWriteBlockProto.newBuilder().setHeader(header)
.setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1)
.setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd)
.setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto)
.setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
for (int i = 0; i < datanodeInfos.length; i++) {
@ -454,14 +453,14 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src,
boolean overwrite, boolean createParent, short replication, long blockSize,
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass,
StreamSlowMonitor monitor) throws IOException {
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor)
throws IOException {
Configuration conf = dfs.getConf();
DFSClient client = dfs.getClient();
String clientName = client.getClientName();
ClientProtocol namenode = client.getNamenode();
int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES,
DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
int createMaxRetries =
conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager();
Set<DatanodeInfo> toExcludeNodes =
new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet());

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -240,8 +240,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
try {
return createTransparentCryptoHelperWithoutHDFS12396();
} catch (NoSuchMethodException e) {
LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," +
" should be hadoop version with HDFS-12396", e);
LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient,"
+ " should be hadoop version with HDFS-12396", e);
}
return createTransparentCryptoHelperWithHDFS12396();
}
@ -324,8 +324,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
private int step = 0;
public SaslNegotiateHandler(Configuration conf, String username, char[] password,
Map<String, String> saslProps, int timeoutMs, Promise<Void> promise,
DFSClient dfsClient) throws SaslException {
Map<String, String> saslProps, int timeoutMs, Promise<Void> promise, DFSClient dfsClient)
throws SaslException {
this.conf = conf;
this.saslProps = saslProps;
this.saslClient = Sasl.createSaslClient(new String[] { MECHANISM }, username, PROTOCOL,
@ -355,8 +355,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
}
/**
* The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty.
* After Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*.
* The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. After
* Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*.
* Use Reflection to check which ones to use.
*/
private static class BuilderPayloadSetter {
@ -366,13 +366,11 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
/**
* Create a ByteString from byte array without copying (wrap), and then set it as the payload
* for the builder.
*
* @param builder builder for HDFS DataTransferEncryptorMessage.
* @param payload byte array of payload.
* @throws IOException
* @param payload byte array of payload. n
*/
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, byte[] payload)
throws IOException {
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder,
byte[] payload) throws IOException {
Object byteStringObject;
try {
// byteStringObject = new LiteralByteString(payload);
@ -396,18 +394,18 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
try {
// See if it can load the relocated ByteString, which comes from hadoop-thirdparty.
byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString");
LOG.debug("Found relocated ByteString class from hadoop-thirdparty." +
" Assuming this is Hadoop 3.3.0+.");
LOG.debug("Found relocated ByteString class from hadoop-thirdparty."
+ " Assuming this is Hadoop 3.3.0+.");
} catch (ClassNotFoundException e) {
LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." +
" Assuming this is below Hadoop 3.3.0", e);
LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty."
+ " Assuming this is below Hadoop 3.3.0", e);
}
// LiteralByteString is a package private class in protobuf. Make it accessible.
Class<?> literalByteStringClass;
try {
literalByteStringClass = Class.forName(
"org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString");
literalByteStringClass =
Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString");
LOG.debug("Shaded LiteralByteString from hadoop-thirdparty is found.");
} catch (ClassNotFoundException e) {
try {
@ -502,8 +500,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
if (!requestedQop.contains(negotiatedQop)) {
throw new IOException(String.format("SASL handshake completed, but "
+ "channel does not have acceptable quality of protection, "
+ "requested = %s, negotiated = %s",
requestedQop, negotiatedQop));
+ "requested = %s, negotiated = %s", requestedQop, negotiatedQop));
}
}
@ -805,8 +802,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
}
doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey),
encryptionKeyToPassword(encryptionKey.encryptionKey),
createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise,
client);
createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, client);
} else if (!UserGroupInformation.isSecurityEnabled()) {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -17,33 +17,29 @@
*/
package org.apache.hadoop.hbase.io.asyncfs;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.List;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil;
import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageDecoder;
import org.apache.hbase.thirdparty.io.netty.util.internal.ObjectUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.List;
/**
* Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder.
* The Netty's ProtobufDecode supports unshaded protobuf messages (com.google.protobuf).
*
* Hadoop 3.3.0 and above relocates protobuf classes to a shaded jar (hadoop-thirdparty), and
* so we must use reflection to detect which one (relocated or not) to use.
*
* Do not use this to process HBase's shaded protobuf messages. This is meant to process the
* protobuf messages in HDFS for the asyncfs use case.
* */
* Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. The Netty's ProtobufDecode
* supports unshaded protobuf messages (com.google.protobuf). Hadoop 3.3.0 and above relocates
* protobuf classes to a shaded jar (hadoop-thirdparty), and so we must use reflection to detect
* which one (relocated or not) to use. Do not use this to process HBase's shaded protobuf messages.
* This is meant to process the protobuf messages in HDFS for the asyncfs use case.
*/
@InterfaceAudience.Private
public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
private static final Logger LOG =
LoggerFactory.getLogger(ProtobufDecoder.class);
private static final Logger LOG = LoggerFactory.getLogger(ProtobufDecoder.class);
private static Class<?> protobufMessageLiteClass = null;
private static Class<?> protobufMessageLiteBuilderClass = null;
@ -60,23 +56,22 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
private Object parser;
private Object builder;
public ProtobufDecoder(Object prototype) {
try {
Method getDefaultInstanceForTypeMethod = protobufMessageLiteClass.getMethod(
"getDefaultInstanceForType");
Object prototype1 = getDefaultInstanceForTypeMethod
.invoke(ObjectUtil.checkNotNull(prototype, "prototype"));
Method getDefaultInstanceForTypeMethod =
protobufMessageLiteClass.getMethod("getDefaultInstanceForType");
Object prototype1 =
getDefaultInstanceForTypeMethod.invoke(ObjectUtil.checkNotNull(prototype, "prototype"));
// parser = prototype.getParserForType()
parser = getParserForTypeMethod.invoke(prototype1);
parseFromMethod = parser.getClass().getMethod(
"parseFrom", byte[].class, int.class, int.class);
parseFromMethod =
parser.getClass().getMethod("parseFrom", byte[].class, int.class, int.class);
// builder = prototype.newBuilderForType();
builder = newBuilderForTypeMethod.invoke(prototype1);
mergeFromMethod = builder.getClass().getMethod(
"mergeFrom", byte[].class, int.class, int.class);
mergeFromMethod =
builder.getClass().getMethod("mergeFrom", byte[].class, int.class, int.class);
// All protobuf message builders inherits from MessageLite.Builder
buildMethod = protobufMessageLiteBuilderClass.getDeclaredMethod("build");
@ -88,8 +83,7 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
}
}
protected void decode(
ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
int length = msg.readableBytes();
byte[] array;
int offset;
@ -122,8 +116,8 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
try {
protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite");
protobufMessageLiteBuilderClass = Class.forName(
"org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder");
protobufMessageLiteBuilderClass =
Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder");
LOG.debug("Hadoop 3.3 and above shades protobuf.");
} catch (ClassNotFoundException e) {
LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e);

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -22,7 +22,6 @@ import java.nio.ByteBuffer;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
@ -95,8 +94,8 @@ public class WrapperAsyncFSOutput implements AsyncFSOutput {
}
long pos = out.getPos();
/**
* This flush0 method could only be called by single thread, so here we could
* safely overwrite without any synchronization.
* This flush0 method could only be called by single thread, so here we could safely overwrite
* without any synchronization.
*/
this.syncedLength = pos;
future.complete(pos);

View File

@ -56,18 +56,17 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
private final int maxExcludeDNCount;
private final Configuration conf;
// This is a map of providerId->StreamSlowMonitor
private final Map<String, StreamSlowMonitor> streamSlowMonitors =
new ConcurrentHashMap<>(1);
private final Map<String, StreamSlowMonitor> streamSlowMonitors = new ConcurrentHashMap<>(1);
public ExcludeDatanodeManager(Configuration conf) {
this.conf = conf;
this.maxExcludeDNCount = conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT);
this.excludeDNsCache = CacheBuilder.newBuilder()
.expireAfterWrite(this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY,
DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS)
.maximumSize(this.maxExcludeDNCount)
.build();
.expireAfterWrite(
this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
TimeUnit.HOURS)
.maximumSize(this.maxExcludeDNCount).build();
}
/**
@ -85,15 +84,15 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
datanodeInfo, cause, excludeDNsCache.size());
return true;
}
LOG.debug("Try add datanode {} to exclude cache by [{}] failed, "
+ "current exclude DNs are {}", datanodeInfo, cause, getExcludeDNs().keySet());
LOG.debug(
"Try add datanode {} to exclude cache by [{}] failed, " + "current exclude DNs are {}",
datanodeInfo, cause, getExcludeDNs().keySet());
return false;
}
public StreamSlowMonitor getStreamSlowMonitor(String name) {
String key = name == null || name.isEmpty() ? "defaultMonitorName" : name;
return streamSlowMonitors
.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this));
return streamSlowMonitors.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this));
}
public Map<DatanodeInfo, Long> getExcludeDNs() {
@ -105,10 +104,12 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
for (StreamSlowMonitor monitor : streamSlowMonitors.values()) {
monitor.onConfigurationChange(conf);
}
this.excludeDNsCache = CacheBuilder.newBuilder().expireAfterWrite(
this.excludeDNsCache = CacheBuilder.newBuilder()
.expireAfterWrite(
this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
TimeUnit.HOURS).maximumSize(this.conf
.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
TimeUnit.HOURS)
.maximumSize(this.conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
.build();
}
}

View File

@ -38,18 +38,16 @@ import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
/**
* Class for monitor the wal file flush performance.
* Each active wal file has a StreamSlowMonitor.
* Class for monitor the wal file flush performance. Each active wal file has a StreamSlowMonitor.
*/
@InterfaceAudience.Private
public class StreamSlowMonitor implements ConfigurationObserver {
private static final Logger LOG = LoggerFactory.getLogger(StreamSlowMonitor.class);
/**
* Configure for the min count for a datanode detected slow.
* If a datanode is detected slow times up to this count, then it will be added to the exclude
* datanode cache by {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)}
* of this regionsever.
* Configure for the min count for a datanode detected slow. If a datanode is detected slow times
* up to this count, then it will be added to the exclude datanode cache by
* {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} of this regionsever.
*/
private static final String WAL_SLOW_DETECT_MIN_COUNT_KEY =
"hbase.regionserver.async.wal.min.slow.detect.count";
@ -63,9 +61,9 @@ public class StreamSlowMonitor implements ConfigurationObserver {
private static final long DEFAULT_WAL_SLOW_DETECT_DATA_TTL = 10 * 60 * 1000; // 10min in ms
/**
* Configure for the speed check of packet min length.
* For packets whose data length smaller than this value, check slow by processing time.
* While for packets whose data length larger than this value, check slow by flushing speed.
* Configure for the speed check of packet min length. For packets whose data length smaller than
* this value, check slow by processing time. While for packets whose data length larger than this
* value, check slow by flushing speed.
*/
private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY =
"hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min";
@ -73,8 +71,8 @@ public class StreamSlowMonitor implements ConfigurationObserver {
private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024;
/**
* Configure for the slow packet process time, a duration from send to ACK.
* The processing time check is for packets that data length smaller than
* Configure for the slow packet process time, a duration from send to ACK. The processing time
* check is for packets that data length smaller than
* {@link StreamSlowMonitor#DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY}
*/
public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY =
@ -112,8 +110,9 @@ public class StreamSlowMonitor implements ConfigurationObserver {
this.datanodeSlowDataQueue = CacheBuilder.newBuilder()
.maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
.expireAfterWrite(conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY,
DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS)
.expireAfterWrite(
conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
TimeUnit.HOURS)
.build(new CacheLoader<DatanodeInfo, Deque<PacketAckData>>() {
@Override
public Deque<PacketAckData> load(DatanodeInfo key) throws Exception {
@ -142,17 +141,20 @@ public class StreamSlowMonitor implements ConfigurationObserver {
// 1. For small packet, we just have a simple time limit, without considering
// the size of the packet.
// 2. For large packet, we will calculate the speed, and check if the speed is too slow.
boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) || (
packetDataLen > minLengthForSpeedCheck
boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs)
|| (packetDataLen > minLengthForSpeedCheck
&& (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs);
if (slow) {
// Check if large diff ack timestamp between replicas,
// should try to avoid misjudgments that caused by GC STW.
if ((lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) || (
lastAckTimestamp <= 0 && unfinished == 0)) {
LOG.info("Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, "
+ "lastAckTimestamp={}, monitor name: {}", datanodeInfo, packetDataLen, processTimeMs,
unfinished, lastAckTimestamp, this.name);
if (
(lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2)
|| (lastAckTimestamp <= 0 && unfinished == 0)
) {
LOG.info(
"Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, "
+ "lastAckTimestamp={}, monitor name: {}",
datanodeInfo, packetDataLen, processTimeMs, unfinished, lastAckTimestamp, this.name);
if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) {
excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack");
}
@ -168,8 +170,10 @@ public class StreamSlowMonitor implements ConfigurationObserver {
private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long processTime) {
Deque<PacketAckData> slowDNQueue = datanodeSlowDataQueue.getUnchecked(datanodeInfo);
long current = EnvironmentEdgeManager.currentTime();
while (!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl
|| slowDNQueue.size() >= minSlowDetectCount)) {
while (
!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl
|| slowDNQueue.size() >= minSlowDetectCount)
) {
slowDNQueue.removeFirst();
}
slowDNQueue.addLast(new PacketAckData(dataLength, processTime));
@ -177,13 +181,13 @@ public class StreamSlowMonitor implements ConfigurationObserver {
}
private void setConf(Configuration conf) {
this.minSlowDetectCount = conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY,
DEFAULT_WAL_SLOW_DETECT_MIN_COUNT);
this.minSlowDetectCount =
conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, DEFAULT_WAL_SLOW_DETECT_MIN_COUNT);
this.slowDataTtl = conf.getLong(WAL_SLOW_DETECT_DATA_TTL_KEY, DEFAULT_WAL_SLOW_DETECT_DATA_TTL);
this.slowPacketAckMs = conf.getLong(DATANODE_SLOW_PACKET_PROCESS_TIME_KEY,
DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME);
this.minLengthForSpeedCheck = conf.getLong(
DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY,
this.minLengthForSpeedCheck =
conf.getLong(DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY,
DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH);
this.minPacketFlushSpeedKBs = conf.getDouble(DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY,
DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED);

View File

@ -1,5 +1,4 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -21,8 +20,8 @@ package org.apache.hadoop.hbase.util;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Similar interface as {@link org.apache.hadoop.util.Progressable} but returns
* a boolean to support canceling the operation.
* Similar interface as {@link org.apache.hadoop.util.Progressable} but returns a boolean to support
* canceling the operation.
* <p/>
* Used for doing updating of OPENING znode during log replay on region open.
*/
@ -30,8 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface CancelableProgressable {
/**
* Report progress. Returns true if operations should continue, false if the
* operation should be canceled and rolled back.
* Report progress. Returns true if operations should continue, false if the operation should be
* canceled and rolled back.
* @return whether to continue (true) or cancel (false) the operation
*/
boolean progress();

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -120,8 +120,10 @@ public final class RecoverLeaseFSUtils {
// Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check
// isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though.
long localStartWaiting = EnvironmentEdgeManager.currentTime();
while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPauseBase *
nbAttempt) {
while (
(EnvironmentEdgeManager.currentTime() - localStartWaiting)
< subsequentPauseBase * nbAttempt
) {
Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
if (findIsFileClosedMeth) {
try {
@ -152,10 +154,10 @@ public final class RecoverLeaseFSUtils {
private static boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout,
final int nbAttempt, final Path p, final long startWaiting) {
if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) {
LOG.warn("Cannot recoverLease after trying for " +
conf.getInt("hbase.lease.recovery.timeout", 900000) +
"ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " +
getLogMessageDetail(nbAttempt, p, startWaiting));
LOG.warn("Cannot recoverLease after trying for "
+ conf.getInt("hbase.lease.recovery.timeout", 900000)
+ "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; "
+ getLogMessageDetail(nbAttempt, p, startWaiting));
return true;
}
return false;
@ -170,8 +172,8 @@ public final class RecoverLeaseFSUtils {
boolean recovered = false;
try {
recovered = dfs.recoverLease(p);
LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") +
getLogMessageDetail(nbAttempt, p, startWaiting));
LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ")
+ getLogMessageDetail(nbAttempt, p, startWaiting));
} catch (IOException e) {
if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) {
// This exception comes out instead of FNFE, fix it
@ -189,8 +191,8 @@ public final class RecoverLeaseFSUtils {
*/
private static String getLogMessageDetail(final int nbAttempt, final Path p,
final long startWaiting) {
return "attempt=" + nbAttempt + " on file=" + p + " after " +
(EnvironmentEdgeManager.currentTime() - startWaiting) + "ms";
return "attempt=" + nbAttempt + " on file=" + p + " after "
+ (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms";
}
/**

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.io.asyncfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
@ -44,18 +45,14 @@ public class TestExcludeDatanodeManager {
StreamSlowMonitor streamSlowDNsMonitor =
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
DatanodeInfo datanodeInfo =
new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1")
.setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333)
.setIpcPort(444).setNetworkLocation("location1").build();
streamSlowDNsMonitor
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0")
.setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222)
.setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build();
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
System.currentTimeMillis() - 5100, 0);
streamSlowDNsMonitor
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
System.currentTimeMillis() - 5100, 0);
streamSlowDNsMonitor
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
System.currentTimeMillis() - 5100, 0);
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo));
@ -68,18 +65,14 @@ public class TestExcludeDatanodeManager {
StreamSlowMonitor streamSlowDNsMonitor =
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
DatanodeInfo datanodeInfo =
new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1")
.setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333)
.setIpcPort(444).setNetworkLocation("location1").build();
streamSlowDNsMonitor
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0")
.setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222)
.setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build();
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
System.currentTimeMillis() - 7000, 0);
streamSlowDNsMonitor
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
System.currentTimeMillis() - 7000, 0);
streamSlowDNsMonitor
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
System.currentTimeMillis() - 7000, 0);
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo));

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -57,6 +57,7 @@ import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
import org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
@ -240,9 +241,9 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
StreamSlowMonitor streamSlowDNsMonitor =
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS,
f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop,
CHANNEL_CLASS, streamSlowDNsMonitor)) {
try (FanOutOneBlockAsyncDFSOutput output =
FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3,
FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, streamSlowDNsMonitor)) {
// should exclude the dead dn when retry so here we only have 2 DNs in pipeline
assertEquals(2, output.getPipeline().length);
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -47,6 +47,7 @@ import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.util;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
@ -69,8 +68,8 @@ public class TestRecoverLeaseFSUtils {
Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
// Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two
// invocations will happen pretty fast... the we fall into the longer wait loop).
assertTrue((EnvironmentEdgeManager.currentTime() - startTime) > (3 *
HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
assertTrue((EnvironmentEdgeManager.currentTime() - startTime)
> (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
}
/**

View File

@ -1,4 +1,4 @@
<?xml version="1.0"?>
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
@ -21,34 +21,14 @@
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase-build-configuration</artifactId>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-build-configuration</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>../hbase-build-configuration</relativePath>
</parent>
<artifactId>hbase-backup</artifactId>
<name>Apache HBase - Backup</name>
<description>Backup for HBase</description>
<build>
<plugins>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
<plugin>
<groupId>net.revelc.code</groupId>
<artifactId>warbucks-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
<dependencies>
<!-- Intra-project dependencies -->
<dependency>
@ -173,12 +153,34 @@
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
<plugin>
<groupId>net.revelc.code</groupId>
<artifactId>warbucks-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
<profiles>
<!-- Profile for building against Hadoop 3.0.0. Activate by default -->
<profile>
<id>hadoop-3.0</id>
<activation>
<property><name>!hadoop.profile</name></property>
<property>
<name>!hadoop.profile</name>
</property>
</activation>
<dependencies>
<dependency>
@ -213,8 +215,7 @@
<artifactId>lifecycle-mapping</artifactId>
<configuration>
<lifecycleMappingMetadata>
<pluginExecutions>
</pluginExecutions>
<pluginExecutions/>
</lifecycleMappingMetadata>
</configuration>
</plugin>

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.util.BackupSet;
import org.apache.yetus.audience.InterfaceAudience;
@ -30,8 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience;
* The administrative API for HBase Backup. Construct an instance and call {@link #close()}
* afterwards.
* <p>
* BackupAdmin can be used to create backups, restore data from backups and for other
* backup-related operations.
* BackupAdmin can be used to create backups, restore data from backups and for other backup-related
* operations.
* @since 2.0
*/
@InterfaceAudience.Private
@ -71,9 +69,9 @@ public interface BackupAdmin extends Closeable {
/**
* Merge backup images command
* @param backupIds array of backup ids of images to be merged
* The resulting backup image will have the same backup id as the most
* recent image from a list of images to be merged
* @param backupIds array of backup ids of images to be merged The resulting backup image will
* have the same backup id as the most recent image from a list of images to be
* merged
* @throws IOException exception
*/
void mergeBackups(String[] backupIds) throws IOException;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -18,13 +18,11 @@
package org.apache.hadoop.hbase.backup;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,11 +15,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;
import java.io.IOException;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.backup.impl.BackupManager;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -58,9 +58,7 @@ import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
/**
*
* Command-line entry point for backup operation
*
*/
@InterfaceAudience.Private
public class BackupDriver extends AbstractHBaseTool {

View File

@ -23,7 +23,6 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
@ -54,7 +53,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
private Connection connection;
private long prevReadFromBackupTbl = 0, // timestamp of most recent read from backup:system table
secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from backup:system table
//used by unit test to skip reading backup:system
// used by unit test to skip reading backup:system
private boolean checkForFullyBackedUpTables = true;
private List<TableName> fullyBackedUpTables = null;
@ -79,8 +78,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
connection = ConnectionFactory.createConnection(conf);
}
try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
Map<byte[], List<Path>>[] res =
tbl.readBulkLoadedFiles(null, tableList);
Map<byte[], List<Path>>[] res = tbl.readBulkLoadedFiles(null, tableList);
secondPrevReadFromBackupTbl = prevReadFromBackupTbl;
prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime();
return getFilenameFromBulkLoad(res);
@ -91,6 +89,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
void setCheckForFullyBackedUpTables(boolean b) {
checkForFullyBackedUpTables = b;
}
@Override
public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
if (conf == null) {

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;
import java.io.IOException;
@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
@ -59,7 +59,10 @@ public class BackupInfo implements Comparable<BackupInfo> {
* Backup session states
*/
public enum BackupState {
RUNNING, COMPLETE, FAILED, ANY
RUNNING,
COMPLETE,
FAILED,
ANY
}
/**
@ -67,7 +70,12 @@ public class BackupInfo implements Comparable<BackupInfo> {
* BackupState.RUNNING
*/
public enum BackupPhase {
REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST
REQUEST,
SNAPSHOT,
PREPARE_INCREMENTAL,
SNAPSHOTCOPY,
INCREMENTAL_COPY,
STORE_MANIFEST
}
/**
@ -137,8 +145,8 @@ public class BackupInfo implements Comparable<BackupInfo> {
private Map<TableName, Map<String, Long>> tableSetTimestampMap;
/**
* Previous Region server log timestamps for table set after distributed log roll key -
* table name, value - map of RegionServer hostname -> last log rolled timestamp
* Previous Region server log timestamps for table set after distributed log roll key - table
* name, value - map of RegionServer hostname -> last log rolled timestamp
*/
private Map<TableName, Map<String, Long>> incrTimestampMap;
@ -198,8 +206,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
return tableSetTimestampMap;
}
public void setTableSetTimestampMap(Map<TableName,
Map<String, Long>> tableSetTimestampMap) {
public void setTableSetTimestampMap(Map<TableName, Map<String, Long>> tableSetTimestampMap) {
this.tableSetTimestampMap = tableSetTimestampMap;
}
@ -357,8 +364,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
* Set the new region server log timestamps after distributed log roll
* @param prevTableSetTimestampMap table timestamp map
*/
public void setIncrTimestampMap(Map<TableName,
Map<String, Long>> prevTableSetTimestampMap) {
public void setIncrTimestampMap(Map<TableName, Map<String, Long>> prevTableSetTimestampMap) {
this.incrTimestampMap = prevTableSetTimestampMap;
}
@ -482,8 +488,8 @@ public class BackupInfo implements Comparable<BackupInfo> {
context.setState(BackupInfo.BackupState.valueOf(proto.getBackupState().name()));
}
context.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(),
proto.getBackupId()));
context
.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(), proto.getBackupId()));
if (proto.hasBackupPhase()) {
context.setPhase(BackupPhase.valueOf(proto.getBackupPhase().name()));
@ -507,12 +513,12 @@ public class BackupInfo implements Comparable<BackupInfo> {
return map;
}
private static Map<TableName, Map<String, Long>> getTableSetTimestampMap(
Map<String, BackupProtos.BackupInfo.RSTimestampMap> map) {
private static Map<TableName, Map<String, Long>>
getTableSetTimestampMap(Map<String, BackupProtos.BackupInfo.RSTimestampMap> map) {
Map<TableName, Map<String, Long>> tableSetTimestampMap = new HashMap<>();
for (Entry<String, BackupProtos.BackupInfo.RSTimestampMap> entry : map.entrySet()) {
tableSetTimestampMap
.put(TableName.valueOf(entry.getKey()), entry.getValue().getRsTimestampMap());
tableSetTimestampMap.put(TableName.valueOf(entry.getKey()),
entry.getValue().getRsTimestampMap());
}
return tableSetTimestampMap;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,11 +15,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;
import java.io.IOException;
import org.apache.hadoop.conf.Configurable;
import org.apache.yetus.audience.InterfaceAudience;
@ -32,7 +30,6 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface BackupMergeJob extends Configurable {
/**
* Run backup merge operation.
*
* @param backupIds backup image ids
* @throws IOException if the backup merge operation fails
*/

View File

@ -9,12 +9,11 @@
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;
@ -22,7 +21,6 @@ import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@ -82,6 +80,7 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver {
LOG.error("Failed to get tables which have been fully backed up", ioe);
}
}
@Override
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,11 +15,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;
import java.util.List;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;
import org.apache.hadoop.hbase.HConstants;
@ -62,8 +61,8 @@ public interface BackupRestoreConstants {
String OPTION_DEBUG_DESC = "Enable debug loggings";
String OPTION_TABLE = "t";
String OPTION_TABLE_DESC = "Table name. If specified, only backup images,"
+ " which contain this table will be listed.";
String OPTION_TABLE_DESC =
"Table name. If specified, only backup images," + " which contain this table will be listed.";
String OPTION_LIST = "l";
String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated.";
@ -84,10 +83,8 @@ public interface BackupRestoreConstants {
String OPTION_KEEP = "k";
String OPTION_KEEP_DESC = "Specifies maximum age of backup (in days) to keep during bulk delete";
String OPTION_TABLE_MAPPING = "m";
String OPTION_TABLE_MAPPING_DESC =
"A comma separated list of target tables. "
String OPTION_TABLE_MAPPING_DESC = "A comma separated list of target tables. "
+ "If specified, each table in <tables> must have a mapping";
String OPTION_YARN_QUEUE_NAME = "q";
String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on";
@ -95,20 +92,17 @@ public interface BackupRestoreConstants {
String JOB_NAME_CONF_KEY = "mapreduce.job.name";
String BACKUP_CONFIG_STRING = BackupRestoreConstants.BACKUP_ENABLE_KEY
+ "=true\n"
+ "hbase.master.logcleaner.plugins="
+"YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
String BACKUP_CONFIG_STRING =
BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n" + "hbase.master.logcleaner.plugins="
+ "YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
+ "hbase.procedure.master.classes=YOUR_CLASSES,"
+"org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
+ "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
+ "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
+ "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
+ "hbase.coprocessor.region.classes=YOUR_CLASSES,"
+ "org.apache.hadoop.hbase.backup.BackupObserver\n"
+ "and restart the cluster\n"
+ "org.apache.hadoop.hbase.backup.BackupObserver\n" + "and restart the cluster\n"
+ "For more information please see http://hbase.apache.org/book.html#backuprestore\n";
String ENABLE_BACKUP = "Backup is not enabled. To enable backup, "+
"in hbase-site.xml, set:\n "
String ENABLE_BACKUP = "Backup is not enabled. To enable backup, " + "in hbase-site.xml, set:\n "
+ BACKUP_CONFIG_STRING;
String VERIFY_BACKUP = "To enable backup, in hbase-site.xml, set:\n " + BACKUP_CONFIG_STRING;
@ -123,7 +117,24 @@ public interface BackupRestoreConstants {
String BACKUPID_PREFIX = "backup_";
enum BackupCommand {
CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS,
SET, SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST, REPAIR
CREATE,
CANCEL,
DELETE,
DESCRIBE,
HISTORY,
STATUS,
CONVERT,
MERGE,
STOP,
SHOW,
HELP,
PROGRESS,
SET,
SET_ADD,
SET_REMOVE,
SET_DELETE,
SET_DESCRIBE,
SET_LIST,
REPAIR
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -26,7 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience;
/**
* Factory implementation for backup/restore related jobs
*
*/
@InterfaceAudience.Private
public final class BackupRestoreFactory {
@ -57,9 +56,8 @@ public final class BackupRestoreFactory {
* @return backup copy job instance
*/
public static BackupCopyJob getBackupCopyJob(Configuration conf) {
Class<? extends BackupCopyJob> cls =
conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyJob.class,
BackupCopyJob.class);
Class<? extends BackupCopyJob> cls = conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS,
MapReduceBackupCopyJob.class, BackupCopyJob.class);
BackupCopyJob service = ReflectionUtils.newInstance(cls, conf);
service.setConf(conf);
return service;
@ -71,9 +69,8 @@ public final class BackupRestoreFactory {
* @return backup merge job instance
*/
public static BackupMergeJob getBackupMergeJob(Configuration conf) {
Class<? extends BackupMergeJob> cls =
conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS, MapReduceBackupMergeJob.class,
BackupMergeJob.class);
Class<? extends BackupMergeJob> cls = conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS,
MapReduceBackupMergeJob.class, BackupMergeJob.class);
BackupMergeJob service = ReflectionUtils.newInstance(cls, conf);
service.setConf(conf);
return service;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,11 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;

View File

@ -1,5 +1,4 @@
/**
*
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -16,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;
import java.io.IOException;
import java.util.HashMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -56,8 +53,8 @@ public final class HBackupFileSystem {
* @param tableName table name
* @return backupPath String for the particular table
*/
public static String
getTableBackupDir(String backupRootDir, String backupId, TableName tableName) {
public static String getTableBackupDir(String backupRootDir, String backupId,
TableName tableName) {
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
+ Path.SEPARATOR;
@ -126,21 +123,19 @@ public final class HBackupFileSystem {
private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId)
throws IOException {
FileSystem fs = backupRootPath.getFileSystem(conf);
Path manifestPath =
new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR
Path manifestPath = new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR
+ BackupManifest.MANIFEST_FILE_NAME);
if (!fs.exists(manifestPath)) {
String errorMsg =
"Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for "
+ backupId + ". File " + manifestPath + " does not exists. Did " + backupId
String errorMsg = "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME
+ " for " + backupId + ". File " + manifestPath + " does not exists. Did " + backupId
+ " correspond to previously taken backup ?";
throw new IOException(errorMsg);
}
return manifestPath;
}
public static BackupManifest
getManifest(Configuration conf, Path backupRootPath, String backupId) throws IOException {
public static BackupManifest getManifest(Configuration conf, Path backupRootPath, String backupId)
throws IOException {
BackupManifest manifest =
new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId));
return manifest;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -59,9 +59,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
/**
*
* Command-line entry point for restore operation
*
*/
@InterfaceAudience.Private
public class RestoreDriver extends AbstractHBaseTool {
@ -107,13 +105,13 @@ public class RestoreDriver extends AbstractHBaseTool {
// whether to only check the dependencies, false by default
boolean check = cmd.hasOption(OPTION_CHECK);
if (check) {
LOG.debug("Found -check option in restore command, "
+ "will check and verify the dependencies");
LOG.debug(
"Found -check option in restore command, " + "will check and verify the dependencies");
}
if (cmd.hasOption(OPTION_SET) && cmd.hasOption(OPTION_TABLE)) {
System.err.println("Options -s and -t are mutaully exclusive,"+
" you can not specify both of them.");
System.err.println(
"Options -s and -t are mutaully exclusive," + " you can not specify both of them.");
printToolUsage();
return -1;
}
@ -155,8 +153,8 @@ public class RestoreDriver extends AbstractHBaseTool {
return -2;
}
if (tables == null) {
System.out.println("ERROR: Backup set '" + setName
+ "' is either empty or does not exist");
System.out
.println("ERROR: Backup set '" + setName + "' is either empty or does not exist");
printToolUsage();
return -3;
}
@ -167,15 +165,16 @@ public class RestoreDriver extends AbstractHBaseTool {
TableName[] sTableArray = BackupUtils.parseTableNames(tables);
TableName[] tTableArray = BackupUtils.parseTableNames(tableMapping);
if (sTableArray != null && tTableArray != null &&
(sTableArray.length != tTableArray.length)) {
if (
sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)
) {
System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping);
printToolUsage();
return -4;
}
client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check,
sTableArray, tTableArray, overwrite));
client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check, sTableArray,
tTableArray, overwrite));
} catch (Exception e) {
LOG.error("Error while running restore backup", e);
return -5;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,11 +15,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;
import java.io.IOException;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
@ -40,6 +38,6 @@ public interface RestoreJob extends Configurable {
* @param fullBackupRestore full backup restore
* @throws IOException if running the job fails
*/
void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables,
boolean fullBackupRestore) throws IOException;
void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, boolean fullBackupRestore)
throws IOException;
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -25,7 +25,6 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@ -285,8 +284,8 @@ public class BackupAdminImpl implements BackupAdmin {
private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable)
throws IOException {
List<TableName> tables = info.getTableNames();
LOG.debug("Remove " + tn + " from " + info.getBackupId() + " tables="
+ info.getTableListAsString());
LOG.debug(
"Remove " + tn + " from " + info.getBackupId() + " tables=" + info.getTableListAsString());
if (tables.contains(tn)) {
tables.remove(tn);
@ -349,8 +348,7 @@ public class BackupAdminImpl implements BackupAdmin {
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
Path targetDirPath =
new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(),
Path targetDirPath = new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(),
backupInfo.getBackupId(), table));
if (outputFs.delete(targetDirPath, true)) {
LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
@ -474,8 +472,8 @@ public class BackupAdminImpl implements BackupAdmin {
}
}
table.addToBackupSet(name, tableNames);
LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name
+ "' backup set");
LOG.info(
"Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + "' backup set");
}
}
@ -484,8 +482,8 @@ public class BackupAdminImpl implements BackupAdmin {
LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'");
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
table.removeFromBackupSet(name, toStringArray(tables));
LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name
+ "' completed.");
LOG.info(
"Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "' completed.");
}
}
@ -534,8 +532,8 @@ public class BackupAdminImpl implements BackupAdmin {
}
if (incrTableSet.isEmpty()) {
String msg = "Incremental backup table set contains no tables. "
+ "You need to run full backup first "
String msg =
"Incremental backup table set contains no tables. " + "You need to run full backup first "
+ (tableList != null ? "on " + StringUtils.join(tableList, ",") : "");
throw new IOException(msg);
@ -559,8 +557,8 @@ public class BackupAdminImpl implements BackupAdmin {
FileSystem outputFs =
FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration());
if (outputFs.exists(targetTableBackupDirPath)) {
throw new IOException("Target backup directory " + targetTableBackupDir
+ " exists already.");
throw new IOException(
"Target backup directory " + targetTableBackupDir + " exists already.");
}
outputFs.mkdirs(targetTableBackupDirPath);
}
@ -581,8 +579,8 @@ public class BackupAdminImpl implements BackupAdmin {
tableList = excludeNonExistingTables(tableList, nonExistingTableList);
} else {
// Throw exception only in full mode - we try to backup non-existing table
throw new IOException("Non-existing tables found in the table list: "
+ nonExistingTableList);
throw new IOException(
"Non-existing tables found in the table list: " + nonExistingTableList);
}
}
}
@ -590,9 +588,9 @@ public class BackupAdminImpl implements BackupAdmin {
// update table list
BackupRequest.Builder builder = new BackupRequest.Builder();
request = builder.withBackupType(request.getBackupType()).withTableList(tableList)
.withTargetRootDir(request.getTargetRootDir())
.withBackupSetName(request.getBackupSetName()).withTotalTasks(request.getTotalTasks())
.withBandwidthPerTasks((int) request.getBandwidth()).build();
.withTargetRootDir(request.getTargetRootDir()).withBackupSetName(request.getBackupSetName())
.withTotalTasks(request.getTotalTasks()).withBandwidthPerTasks((int) request.getBandwidth())
.build();
TableBackupClient client;
try {
@ -619,7 +617,7 @@ public class BackupAdminImpl implements BackupAdmin {
public void mergeBackups(String[] backupIds) throws IOException {
try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
checkIfValidForMerge(backupIds, sysTable);
//TODO run job on remote cluster
// TODO run job on remote cluster
BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration());
job.run(backupIds);
}
@ -627,7 +625,6 @@ public class BackupAdminImpl implements BackupAdmin {
/**
* Verifies that backup images are valid for merge.
*
* <ul>
* <li>All backups MUST be in the same destination
* <li>No FULL backups are allowed - only INCREMENTAL
@ -688,7 +685,7 @@ public class BackupAdminImpl implements BackupAdmin {
BackupInfo.Filter timeRangeFilter = info -> {
long time = info.getStartTs();
return time >= startRangeTime && time <= endRangeTime ;
return time >= startRangeTime && time <= endRangeTime;
};
BackupInfo.Filter tableFilter = info -> {
@ -699,20 +696,20 @@ public class BackupAdminImpl implements BackupAdmin {
BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL;
BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE;
List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter,
timeRangeFilter, tableFilter, typeFilter, stateFilter);
List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter, timeRangeFilter,
tableFilter, typeFilter, stateFilter);
if (allInfos.size() != allBackups.size()) {
// Yes we have at least one hole in backup image sequence
List<String> missingIds = new ArrayList<>();
for(BackupInfo info: allInfos) {
if(allBackups.contains(info.getBackupId())) {
for (BackupInfo info : allInfos) {
if (allBackups.contains(info.getBackupId())) {
continue;
}
missingIds.add(info.getBackupId());
}
String errMsg =
"Sequence of backup ids has 'holes'. The following backup images must be added:" +
org.apache.hadoop.util.StringUtils.join(",", missingIds);
"Sequence of backup ids has 'holes'. The following backup images must be added:"
+ org.apache.hadoop.util.StringUtils.join(",", missingIds);
throw new IOException(errMsg);
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC;
@ -44,7 +43,6 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_
import java.io.IOException;
import java.net.URI;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@ -88,8 +86,7 @@ public final class BackupCommands {
+ " describe show the detailed information of a backup image\n"
+ " history show history of all successful backups\n"
+ " progress show the progress of the latest backup request\n"
+ " set backup set management\n"
+ " repair repair backup system table\n"
+ " set backup set management\n" + " repair repair backup system table\n"
+ " merge merge backup images\n"
+ "Run \'hbase backup COMMAND -h\' to see help message for each command\n";
@ -105,8 +102,8 @@ public final class BackupCommands {
public static final String NO_INFO_FOUND = "No info was found for backup id: ";
public static final String NO_ACTIVE_SESSION_FOUND = "No active backup sessions found.";
public static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup describe <backup_id>\n"
+ " backup_id Backup image id\n";
public static final String DESCRIBE_CMD_USAGE =
"Usage: hbase backup describe <backup_id>\n" + " backup_id Backup image id\n";
public static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [options]";
@ -115,12 +112,11 @@ public final class BackupCommands {
public static final String REPAIR_CMD_USAGE = "Usage: hbase backup repair\n";
public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n"
+ " name Backup set name\n"
+ " tables Comma separated list of tables.\n" + "COMMAND is one of:\n"
+ " add add tables to a set, create a set if needed\n"
+ " name Backup set name\n" + " tables Comma separated list of tables.\n"
+ "COMMAND is one of:\n" + " add add tables to a set, create a set if needed\n"
+ " remove remove tables from a set\n"
+ " list list all backup sets in the system\n"
+ " describe describe set\n" + " delete delete backup set\n";
+ " list list all backup sets in the system\n" + " describe describe set\n"
+ " delete delete backup set\n";
public static final String MERGE_CMD_USAGE = "Usage: hbase backup merge [backup_ids]\n"
+ " backup_ids Comma separated list of backup image ids.\n";
@ -281,8 +277,10 @@ public final class BackupCommands {
throw new IOException(INCORRECT_USAGE);
}
if (!BackupType.FULL.toString().equalsIgnoreCase(args[1])
&& !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])) {
if (
!BackupType.FULL.toString().equalsIgnoreCase(args[1])
&& !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])
) {
System.out.println("ERROR: invalid backup type: " + args[1]);
printUsage();
throw new IOException(INCORRECT_USAGE);
@ -301,8 +299,8 @@ public final class BackupCommands {
// Check if we have both: backup set and list of tables
if (cmdline.hasOption(OPTION_TABLE) && cmdline.hasOption(OPTION_SET)) {
System.out.println("ERROR: You can specify either backup set or list"
+ " of tables, but not both");
System.out
.println("ERROR: You can specify either backup set or list" + " of tables, but not both");
printUsage();
throw new IOException(INCORRECT_USAGE);
}
@ -315,20 +313,20 @@ public final class BackupCommands {
tables = getTablesForSet(setName, getConf());
if (tables == null) {
System.out.println("ERROR: Backup set '" + setName
+ "' is either empty or does not exist");
System.out
.println("ERROR: Backup set '" + setName + "' is either empty or does not exist");
printUsage();
throw new IOException(INCORRECT_USAGE);
}
} else {
tables = cmdline.getOptionValue(OPTION_TABLE);
}
int bandwidth =
cmdline.hasOption(OPTION_BANDWIDTH) ? Integer.parseInt(cmdline
.getOptionValue(OPTION_BANDWIDTH)) : -1;
int workers =
cmdline.hasOption(OPTION_WORKERS) ? Integer.parseInt(cmdline
.getOptionValue(OPTION_WORKERS)) : -1;
int bandwidth = cmdline.hasOption(OPTION_BANDWIDTH)
? Integer.parseInt(cmdline.getOptionValue(OPTION_BANDWIDTH))
: -1;
int workers = cmdline.hasOption(OPTION_WORKERS)
? Integer.parseInt(cmdline.getOptionValue(OPTION_WORKERS))
: -1;
if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) {
String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME);
@ -338,9 +336,7 @@ public final class BackupCommands {
try (BackupAdminImpl admin = new BackupAdminImpl(conn)) {
BackupRequest.Builder builder = new BackupRequest.Builder();
BackupRequest request =
builder
.withBackupType(BackupType.valueOf(args[1].toUpperCase()))
BackupRequest request = builder.withBackupType(BackupType.valueOf(args[1].toUpperCase()))
.withTableList(
tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null)
.withTargetRootDir(targetBackupDir).withTotalTasks(workers)
@ -506,8 +502,8 @@ public final class BackupCommands {
public void execute() throws IOException {
if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 1) {
System.out.println("No backup id was specified, "
+ "will retrieve the most recent (ongoing) session");
System.out.println(
"No backup id was specified, " + "will retrieve the most recent (ongoing) session");
}
String[] args = cmdline == null ? null : cmdline.getArgs();
if (args != null && args.length > 2) {
@ -694,9 +690,8 @@ public final class BackupCommands {
// set overall backup status: failed
backupInfo.setState(BackupState.FAILED);
// compose the backup failed data
String backupFailedData =
"BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs()
+ ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts="
+ backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
+ backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
System.out.println(backupFailedData);
TableBackupClient.cleanupAndRestoreBackupSystem(conn, backupInfo, conf);
@ -754,9 +749,11 @@ public final class BackupCommands {
}
boolean res = fs.rename(tmpPath, destPath);
if (!res) {
throw new IOException("MERGE repair: failed to rename from "+ tmpPath+" to "+ destPath);
throw new IOException(
"MERGE repair: failed to rename from " + tmpPath + " to " + destPath);
}
System.out.println("MERGE repair: renamed from "+ tmpPath+" to "+ destPath+" res="+ res);
System.out
.println("MERGE repair: renamed from " + tmpPath + " to " + destPath + " res=" + res);
} else {
checkRemoveBackupImages(fs, backupRoot, backupIds);
}
@ -773,16 +770,16 @@ public final class BackupCommands {
private static void checkRemoveBackupImages(FileSystem fs, String backupRoot,
String[] backupIds) throws IOException {
String mergedBackupId = BackupUtils.findMostRecentBackupId(backupIds);
for (String backupId: backupIds) {
for (String backupId : backupIds) {
if (backupId.equals(mergedBackupId)) {
continue;
}
Path path = HBackupFileSystem.getBackupPath(backupRoot, backupId);
if (fs.exists(path)) {
if (!fs.delete(path, true)) {
System.out.println("MERGE repair removing: "+ path +" - FAILED");
System.out.println("MERGE repair removing: " + path + " - FAILED");
} else {
System.out.println("MERGE repair removing: "+ path +" - OK");
System.out.println("MERGE repair removing: " + path + " - OK");
}
}
}
@ -816,16 +813,16 @@ public final class BackupCommands {
String[] args = cmdline == null ? null : cmdline.getArgs();
if (args == null || (args.length != 2)) {
System.err.println("ERROR: wrong number of arguments: "
+ (args == null ? null : args.length));
System.err
.println("ERROR: wrong number of arguments: " + (args == null ? null : args.length));
printUsage();
throw new IOException(INCORRECT_USAGE);
}
String[] backupIds = args[1].split(",");
if (backupIds.length < 2) {
String msg = "ERROR: can not merge a single backup image. "+
"Number of images must be greater than 1.";
String msg = "ERROR: can not merge a single backup image. "
+ "Number of images must be greater than 1.";
System.err.println(msg);
throw new IOException(msg);

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;
import org.apache.hadoop.hbase.HBaseIOException;
@ -68,8 +67,7 @@ public class BackupException extends HBaseIOException {
}
/**
* Exception when the description of the backup cannot be determined, due to some other root
* cause
* Exception when the description of the backup cannot be determined, due to some other root cause
* @param message description of what caused the failure
* @param e root cause
*/

View File

@ -1,5 +1,4 @@
/**
*
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -77,8 +76,10 @@ public class BackupManager implements Closeable {
* @throws IOException exception
*/
public BackupManager(Connection conn, Configuration conf) throws IOException {
if (!conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) {
if (
!conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)
) {
throw new BackupException("HBase backup is not enabled. Check your "
+ BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting.");
}
@ -120,12 +121,13 @@ public class BackupManager implements Closeable {
}
plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, (plugins == null ? "" : plugins + ",") +
BackupHFileCleaner.class.getName());
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
(plugins == null ? "" : plugins + ",") + BackupHFileCleaner.class.getName());
if (LOG.isDebugEnabled()) {
LOG.debug("Added log cleaner: {}. Added master procedure manager: {}."
+"Added master procedure manager: {}", cleanerClass, masterProcedureClass,
BackupHFileCleaner.class.getName());
LOG.debug(
"Added log cleaner: {}. Added master procedure manager: {}."
+ "Added master procedure manager: {}",
cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName());
}
}
@ -163,8 +165,7 @@ public class BackupManager implements Closeable {
}
/**
* Get configuration
* @return configuration
* Get configuration n
*/
Configuration getConf() {
return conf;
@ -191,9 +192,7 @@ public class BackupManager implements Closeable {
* @param tableList table list
* @param targetRootDir root dir
* @param workers number of parallel workers
* @param bandwidth bandwidth per worker in MB per sec
* @return BackupInfo
* @throws BackupException exception
* @param bandwidth bandwidth per worker in MB per sec n * @throws BackupException exception
*/
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList,
String targetRootDir, int workers, long bandwidth) throws BackupException {
@ -325,16 +324,16 @@ public class BackupManager implements Closeable {
} else {
Path logBackupPath =
HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId());
LOG.debug("Current backup has an incremental backup ancestor, "
+ "touching its image manifest in {}"
+ " to construct the dependency.", logBackupPath.toString());
LOG.debug(
"Current backup has an incremental backup ancestor, "
+ "touching its image manifest in {}" + " to construct the dependency.",
logBackupPath.toString());
BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath);
BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
ancestors.add(lastIncrImage);
LOG.debug(
"Last dependent incremental backup image: {BackupID={}" +
"BackupDir={}}", lastIncrImage.getBackupId(), lastIncrImage.getRootDir());
LOG.debug("Last dependent incremental backup image: {BackupID={}" + "BackupDir={}}",
lastIncrImage.getBackupId(), lastIncrImage.getRootDir());
}
}
}
@ -399,8 +398,10 @@ public class BackupManager implements Closeable {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
if (lastWarningOutputTime == 0
|| (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000) {
if (
lastWarningOutputTime == 0
|| (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000
) {
lastWarningOutputTime = EnvironmentEdgeManager.currentTime();
LOG.warn("Waiting to acquire backup exclusive lock for {}s",
+(lastWarningOutputTime - startTime) / 1000);
@ -480,8 +481,8 @@ public class BackupManager implements Closeable {
* @param tables tables
* @throws IOException exception
*/
public void writeRegionServerLogTimestamp(Set<TableName> tables,
Map<String, Long> newTimestamps) throws IOException {
public void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String, Long> newTimestamps)
throws IOException {
systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, backupInfo.getBackupRootDir());
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;
import java.io.IOException;
@ -26,7 +25,6 @@ import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -50,9 +48,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
/**
* Backup manifest contains all the meta data of a backup image. The manifest info will be bundled
* as manifest file together with data. So that each backup image will contain all the info needed
* for restore. BackupManifest is a storage container for BackupImage.
* It is responsible for storing/reading backup image data and has some additional utility methods.
*
* for restore. BackupManifest is a storage container for BackupImage. It is responsible for
* storing/reading backup image data and has some additional utility methods.
*/
@InterfaceAudience.Private
public class BackupManifest {
@ -126,8 +123,8 @@ public class BackupManifest {
super();
}
private BackupImage(String backupId, BackupType type, String rootDir,
List<TableName> tableList, long startTs, long completeTs) {
private BackupImage(String backupId, BackupType type, String rootDir, List<TableName> tableList,
long startTs, long completeTs) {
this.backupId = backupId;
this.type = type;
this.rootDir = rootDir;
@ -149,8 +146,8 @@ public class BackupManifest {
List<BackupProtos.BackupImage> ancestorList = im.getAncestorsList();
BackupType type =
im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL
BackupType type = im.getBackupType() == BackupProtos.BackupType.FULL
? BackupType.FULL
: BackupType.INCREMENTAL;
BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
@ -187,8 +184,8 @@ public class BackupManifest {
return builder.build();
}
private static Map<TableName, Map<String, Long>> loadIncrementalTimestampMap(
BackupProtos.BackupImage proto) {
private static Map<TableName, Map<String, Long>>
loadIncrementalTimestampMap(BackupProtos.BackupImage proto) {
List<BackupProtos.TableServerTimestamp> list = proto.getTstMapList();
Map<TableName, Map<String, Long>> incrTimeRanges = new HashMap<>();
@ -378,8 +375,7 @@ public class BackupManifest {
*/
public BackupManifest(BackupInfo backup) {
BackupImage.Builder builder = BackupImage.newBuilder();
this.backupImage =
builder.withBackupId(backup.getBackupId()).withType(backup.getType())
this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
}
@ -393,15 +389,13 @@ public class BackupManifest {
List<TableName> tables = new ArrayList<TableName>();
tables.add(table);
BackupImage.Builder builder = BackupImage.newBuilder();
this.backupImage =
builder.withBackupId(backup.getBackupId()).withType(backup.getType())
this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
.withRootDir(backup.getBackupRootDir()).withTableList(tables)
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
}
/**
* Construct manifest from a backup directory.
*
* @param conf configuration
* @param backupPath backup path
* @throws IOException if constructing the manifest from the backup directory fails
@ -480,8 +474,8 @@ public class BackupManifest {
byte[] data = backupImage.toProto().toByteArray();
// write the file, overwrite if already exist
Path manifestFilePath =
new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(),
backupImage.getBackupId()), MANIFEST_FILE_NAME);
new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), backupImage.getBackupId()),
MANIFEST_FILE_NAME);
try (FSDataOutputStream out =
manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) {
out.write(data);
@ -531,8 +525,8 @@ public class BackupManifest {
for (BackupImage image : backupImage.getAncestors()) {
restoreImages.put(Long.valueOf(image.startTs), image);
}
return new ArrayList<>(reverse ? (restoreImages.descendingMap().values())
: (restoreImages.values()));
return new ArrayList<>(
reverse ? (restoreImages.descendingMap().values()) : (restoreImages.values()));
}
/**
@ -664,8 +658,8 @@ public class BackupManifest {
info.setStartTs(backupImage.getStartTs());
info.setBackupRootDir(backupImage.getRootDir());
if (backupImage.getType() == BackupType.INCREMENTAL) {
info.setHLogTargetDir(BackupUtils.getLogBackupDir(backupImage.getRootDir(),
backupImage.getBackupId()));
info.setHLogTargetDir(
BackupUtils.getLogBackupDir(backupImage.getRootDir(), backupImage.getBackupId()));
}
return info;
}

View File

@ -1,5 +1,4 @@
/**
*
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -69,6 +68,7 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
@ -281,8 +281,10 @@ public final class BackupSystemTable implements Closeable {
res.advance();
byte[] row = CellUtil.cloneRow(res.listCells().get(0));
for (Cell cell : res.listCells()) {
if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0) {
if (
CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0
) {
map.put(row, Bytes.toString(CellUtil.cloneValue(cell)));
}
}
@ -310,14 +312,20 @@ public final class BackupSystemTable implements Closeable {
byte[] fam = null;
String path = null;
for (Cell cell : res.listCells()) {
if (CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
BackupSystemTable.TBL_COL.length) == 0) {
if (
CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
BackupSystemTable.TBL_COL.length) == 0
) {
tbl = TableName.valueOf(CellUtil.cloneValue(cell));
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
BackupSystemTable.FAM_COL.length) == 0) {
} else if (
CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
BackupSystemTable.FAM_COL.length) == 0
) {
fam = CellUtil.cloneValue(cell);
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0) {
} else if (
CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0
) {
path = Bytes.toString(CellUtil.cloneValue(cell));
}
}
@ -448,14 +456,20 @@ public final class BackupSystemTable implements Closeable {
rows.add(row);
String rowStr = Bytes.toString(row);
region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr);
if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
BackupSystemTable.FAM_COL.length) == 0) {
if (
CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
BackupSystemTable.FAM_COL.length) == 0
) {
fam = Bytes.toString(CellUtil.cloneValue(cell));
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0) {
} else if (
CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0
) {
path = Bytes.toString(CellUtil.cloneValue(cell));
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
BackupSystemTable.STATE_COL.length) == 0) {
} else if (
CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
BackupSystemTable.STATE_COL.length) == 0
) {
byte[] state = CellUtil.cloneValue(cell);
if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) {
raw = true;
@ -591,11 +605,15 @@ public final class BackupSystemTable implements Closeable {
try (Table table = connection.getTable(tableName)) {
Put put = createPutForStartBackupSession();
// First try to put if row does not exist
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifNotExists().thenPut(put)) {
if (
!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifNotExists().thenPut(put)
) {
// Row exists, try to put if value == ACTIVE_SESSION_NO
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)) {
if (
!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)
) {
throw new ExclusiveOperationException();
}
}
@ -613,8 +631,10 @@ public final class BackupSystemTable implements Closeable {
try (Table table = connection.getTable(tableName)) {
Put put = createPutForStopBackupSession();
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifEquals(ACTIVE_SESSION_YES).thenPut(put)) {
if (
!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifEquals(ACTIVE_SESSION_YES).thenPut(put)
) {
throw new IOException("There is no active backup exclusive operation");
}
}
@ -852,8 +872,8 @@ public final class BackupSystemTable implements Closeable {
* @param backupRoot root directory path to backup
* @throws IOException exception
*/
public void writeRegionServerLogTimestamp(Set<TableName> tables,
Map<String, Long> newTimestamps, String backupRoot) throws IOException {
public void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String, Long> newTimestamps,
String backupRoot) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("write RS log time stamps to backup system table for tables ["
+ StringUtils.join(tables, ",") + "]");
@ -1472,8 +1492,8 @@ public final class BackupSystemTable implements Closeable {
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file));
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
puts.add(put);
LOG.debug(
"writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
LOG
.debug("writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
}
}
return puts;
@ -1740,8 +1760,8 @@ public final class BackupSystemTable implements Closeable {
*/
static Scan createScanForBulkLoadedFiles(String backupId) {
Scan scan = new Scan();
byte[] startRow = backupId == null ? BULK_LOAD_PREFIX_BYTES
: rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM);
byte[] startRow =
backupId == null ? BULK_LOAD_PREFIX_BYTES : rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM);
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
scan.withStartRow(startRow);

View File

@ -1,5 +1,4 @@
/**
*
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -19,7 +18,6 @@
package org.apache.hadoop.hbase.backup.impl;
import java.io.IOException;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_ATTEMPTS_PAUSE_MS_KEY;
@ -28,7 +27,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupCopyJob;
import org.apache.hadoop.hbase.backup.BackupInfo;
@ -48,7 +46,6 @@ import org.slf4j.LoggerFactory;
/**
* Full table backup implementation
*
*/
@InterfaceAudience.Private
public class FullTableBackupClient extends TableBackupClient {
@ -127,7 +124,6 @@ public class FullTableBackupClient extends TableBackupClient {
/**
* Backup request execution.
*
* @throws IOException if the execution of the backup fails
*/
@Override
@ -163,9 +159,8 @@ public class FullTableBackupClient extends TableBackupClient {
// SNAPSHOT_TABLES:
backupInfo.setPhase(BackupPhase.SNAPSHOT);
for (TableName tableName : tableList) {
String snapshotName =
"snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
+ tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime())
+ "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
snapshotTable(admin, tableName, snapshotName);
backupInfo.setSnapshotName(tableName, snapshotName);
@ -191,8 +186,7 @@ public class FullTableBackupClient extends TableBackupClient {
backupInfo.setTableSetTimestampMap(newTableSetTimestampMap);
Long newStartCode =
BackupUtils.getMinValue(BackupUtils
.getRSLogTimestampMins(newTableSetTimestampMap));
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode);
// backup complete
@ -206,10 +200,8 @@ public class FullTableBackupClient extends TableBackupClient {
protected void snapshotTable(Admin admin, TableName tableName, String snapshotName)
throws IOException {
int maxAttempts =
conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS);
int pause =
conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS);
int maxAttempts = conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS);
int pause = conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS);
int attempts = 0;
while (attempts++ < maxAttempts) {
@ -229,6 +221,6 @@ public class FullTableBackupClient extends TableBackupClient {
}
}
}
throw new IOException("Failed to snapshot table "+ tableName);
throw new IOException("Failed to snapshot table " + tableName);
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;
import java.io.IOException;
@ -77,10 +76,10 @@ public class IncrementalBackupManager extends BackupManager {
LOG.debug("StartCode " + savedStartCode + "for backupID " + backupInfo.getBackupId());
}
// get all new log files from .logs and .oldlogs after last TS and before new timestamp
if (savedStartCode == null || previousTimestampMins == null
|| previousTimestampMins.isEmpty()) {
throw new IOException(
"Cannot read any previous back up timestamps from backup system table. "
if (
savedStartCode == null || previousTimestampMins == null || previousTimestampMins.isEmpty()
) {
throw new IOException("Cannot read any previous back up timestamps from backup system table. "
+ "In order to create an incremental backup, at least one full backup is needed.");
}
@ -103,7 +102,7 @@ public class IncrementalBackupManager extends BackupManager {
private List<String> excludeProcV2WALs(List<String> logList) {
List<String> list = new ArrayList<>();
for (int i=0; i < logList.size(); i++) {
for (int i = 0; i < logList.size(); i++) {
Path p = new Path(logList.get(i));
String name = p.getName();
@ -194,7 +193,7 @@ public class IncrementalBackupManager extends BackupManager {
if (ts == null) {
LOG.warn("ORPHAN log found: " + log + " host=" + host);
LOG.debug("Known hosts (from newestTimestamps):");
for (String s: newestTimestamps.keySet()) {
for (String s : newestTimestamps.keySet()) {
LOG.debug(s);
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
@ -53,9 +52,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Incremental backup implementation.
* See the {@link #execute() execute} method.
*
* Incremental backup implementation. See the {@link #execute() execute} method.
*/
@InterfaceAudience.Private
public class IncrementalTableBackupClient extends TableBackupClient {
@ -105,8 +102,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
}
/*
* Reads bulk load records from backup table, iterates through the records and forms the paths
* for bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination
* Reads bulk load records from backup table, iterates through the records and forms the paths for
* bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination
* @param sTableList list of tables to be backed up
* @return map of table to List of files
*/
@ -128,8 +125,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
Path rootdir = CommonFSUtils.getRootDir(conf);
Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId);
for (Map.Entry<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> tblEntry :
map.entrySet()) {
for (Map.Entry<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> tblEntry : map
.entrySet()) {
TableName srcTable = tblEntry.getKey();
int srcIdx = getIndex(srcTable, sTableList);
@ -143,13 +140,13 @@ public class IncrementalTableBackupClient extends TableBackupClient {
Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable);
Path tgtTable = new Path(new Path(tgtRoot, srcTable.getNamespaceAsString()),
srcTable.getQualifierAsString());
for (Map.Entry<String,Map<String,List<Pair<String, Boolean>>>> regionEntry :
tblEntry.getValue().entrySet()){
for (Map.Entry<String, Map<String, List<Pair<String, Boolean>>>> regionEntry : tblEntry
.getValue().entrySet()) {
String regionName = regionEntry.getKey();
Path regionDir = new Path(tblDir, regionName);
// map from family to List of hfiles
for (Map.Entry<String,List<Pair<String, Boolean>>> famEntry :
regionEntry.getValue().entrySet()) {
for (Map.Entry<String, List<Pair<String, Boolean>>> famEntry : regionEntry.getValue()
.entrySet()) {
String fam = famEntry.getKey();
Path famDir = new Path(regionDir, fam);
List<Path> files;
@ -170,7 +167,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
int idx = file.lastIndexOf("/");
String filename = file;
if (idx > 0) {
filename = file.substring(idx+1);
filename = file.substring(idx + 1);
}
Path p = new Path(famDir, filename);
Path tgt = new Path(tgtFam, filename);
@ -183,7 +180,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
LOG.trace("copying " + p + " to " + tgt);
}
activeFiles.add(p.toString());
} else if (fs.exists(archive)){
} else if (fs.exists(archive)) {
LOG.debug("copying archive " + archive + " to " + tgt);
archiveFiles.add(archive.toString());
}
@ -207,8 +204,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
String tgtDest = backupInfo.getBackupRootDir() + Path.SEPARATOR + backupInfo.getBackupId();
int attempt = 1;
while (activeFiles.size() > 0) {
LOG.info("Copy "+ activeFiles.size() +
" active bulk loaded files. Attempt ="+ (attempt++));
LOG.info(
"Copy " + activeFiles.size() + " active bulk loaded files. Attempt =" + (attempt++));
String[] toCopy = new String[activeFiles.size()];
activeFiles.toArray(toCopy);
// Active file can be archived during copy operation,
@ -270,8 +267,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL);
LOG.debug("For incremental backup, current table set is "
+ backupManager.getIncrementalBackupTableSet());
newTimestamps =
((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
} catch (Exception e) {
// fail the overall backup and return
failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ",
@ -285,7 +281,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
// convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
convertWALsToHFiles();
incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()},
incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() },
backupInfo.getBackupRootDir());
} catch (Exception e) {
String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId;
@ -298,8 +294,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
// After this checkpoint, even if entering cancel process, will let the backup finished
try {
// Set the previousTimestampMap which is before this current log roll to the manifest.
Map<TableName, Map<String, Long>> previousTimestampMap =
backupManager.readLogTimestampMap();
Map<TableName, Map<String, Long>> previousTimestampMap = backupManager.readLogTimestampMap();
backupInfo.setIncrTimestampMap(previousTimestampMap);
// The table list in backupInfo is good for both full backup and incremental backup.
@ -345,11 +340,11 @@ public class IncrementalTableBackupClient extends TableBackupClient {
int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr);
if (res != 0) {
LOG.error("Copy incremental HFile files failed with return code: " + res + ".");
throw new IOException("Failed copy from " + StringUtils.join(files, ',')
+ " to " + backupDest);
throw new IOException(
"Failed copy from " + StringUtils.join(files, ',') + " to " + backupDest);
}
LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',')
+ " to " + backupDest + " finished.");
LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',') + " to " + backupDest
+ " finished.");
} finally {
deleteBulkLoadDirectory();
}
@ -398,7 +393,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
// a Map task for each file. We use ';' as separator
// because WAL file names contains ','
String dirs = StringUtils.join(dirPaths, ';');
String jobname = "Incremental_Backup-" + backupId ;
String jobname = "Incremental_Backup-" + backupId;
Path bulkOutputPath = getBulkOutputDir();
conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
@ -410,7 +405,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
try {
player.setConf(conf);
int result = player.run(playerArgs);
if(result != 0) {
if (result != 0) {
throw new IOException("WAL Player failed");
}
conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY);

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
@ -25,7 +24,6 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.TreeSet;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@ -47,7 +45,6 @@ import org.slf4j.LoggerFactory;
/**
* Restore table implementation
*
*/
@InterfaceAudience.Private
public class RestoreTablesClient {
@ -76,7 +73,6 @@ public class RestoreTablesClient {
/**
* Validate target tables.
*
* @param tTableArray target tables
* @param isOverwrite overwrite existing table
* @throws IOException exception
@ -102,12 +98,11 @@ public class RestoreTablesClient {
if (existTableList.size() > 0) {
if (!isOverwrite) {
LOG.error("Existing table (" + existTableList
+ ") found in the restore target, please add "
LOG.error("Existing table (" + existTableList + ") found in the restore target, please add "
+ "\"-o\" as overwrite option in the command if you mean"
+ " to restore to these existing tables");
throw new IOException("Existing table found in target while no \"-o\" "
+ "as overwrite option found");
throw new IOException(
"Existing table found in target while no \"-o\" " + "as overwrite option found");
} else {
if (disabledTableList.size() > 0) {
LOG.error("Found offline table in the restore target, "
@ -122,7 +117,6 @@ public class RestoreTablesClient {
/**
* Restore operation handle each backupImage in array.
*
* @param images array BackupImage
* @param sTable table to be restored
* @param tTable table to be restored to
@ -229,8 +223,7 @@ public class RestoreTablesClient {
LOG.info("Restore includes the following image(s):");
for (BackupImage image : restoreImageSet) {
LOG.info("Backup: " + image.getBackupId() + " "
+ HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(),
table));
+ HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table));
}
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -44,10 +44,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Base class for backup operation. Concrete implementation for
* full and incremental backup are delegated to corresponding sub-classes:
* {@link FullTableBackupClient} and {@link IncrementalTableBackupClient}
*
* Base class for backup operation. Concrete implementation for full and incremental backup are
* delegated to corresponding sub-classes: {@link FullTableBackupClient} and
* {@link IncrementalTableBackupClient}
*/
@InterfaceAudience.Private
public abstract class TableBackupClient {
@ -88,8 +87,7 @@ public abstract class TableBackupClient {
this.conn = conn;
this.conf = conn.getConfiguration();
this.fs = CommonFSUtils.getCurrentFileSystem(conf);
backupInfo =
backupManager.createBackupInfo(backupId, request.getBackupType(), tableList,
backupInfo = backupManager.createBackupInfo(backupId, request.getBackupType(), tableList,
request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth());
if (tableList == null || tableList.isEmpty()) {
this.tableList = new ArrayList<>(backupInfo.getTables());
@ -159,9 +157,8 @@ public abstract class TableBackupClient {
*/
protected static void cleanupExportSnapshotLog(Configuration conf) throws IOException {
FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf);
Path stagingDir =
new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory()
.toString()));
Path stagingDir = new Path(
conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory().toString()));
FileStatus[] files = CommonFSUtils.listStatus(fs, stagingDir);
if (files == null) {
return;
@ -177,30 +174,29 @@ public abstract class TableBackupClient {
}
/**
* Clean up the uncompleted data at target directory if the ongoing backup has already entered
* the copy phase.
* Clean up the uncompleted data at target directory if the ongoing backup has already entered the
* copy phase.
*/
protected static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
try {
// clean up the uncompleted data at target directory if the ongoing backup has already entered
// the copy phase
LOG.debug("Trying to cleanup up target dir. Current backup phase: "
+ backupInfo.getPhase());
if (backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
LOG.debug("Trying to cleanup up target dir. Current backup phase: " + backupInfo.getPhase());
if (
backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
|| backupInfo.getPhase().equals(BackupPhase.INCREMENTAL_COPY)
|| backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST)) {
FileSystem outputFs =
FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
|| backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST)
) {
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
// now treat one backup as a transaction, clean up data that has been partially copied at
// table level
for (TableName table : backupInfo.getTables()) {
Path targetDirPath =
new Path(HBackupFileSystem.getTableBackupDir(backupInfo.getBackupRootDir(),
backupInfo.getBackupId(), table));
Path targetDirPath = new Path(HBackupFileSystem
.getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
if (outputFs.delete(targetDirPath, true)) {
LOG.debug("Cleaning up uncompleted backup data at " + targetDirPath.toString()
+ " done.");
LOG.debug(
"Cleaning up uncompleted backup data at " + targetDirPath.toString() + " done.");
} else {
LOG.debug("No data has been copied to " + targetDirPath.toString() + ".");
}
@ -238,10 +234,9 @@ public abstract class TableBackupClient {
// set overall backup status: failed
backupInfo.setState(BackupState.FAILED);
// compose the backup failed data
String backupFailedData =
"BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs()
+ ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" + backupInfo.getPhase()
+ ",failedmessage=" + backupInfo.getFailedMsg();
String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts="
+ backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
+ backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
LOG.error(backupFailedData);
cleanupAndRestoreBackupSystem(conn, backupInfo, conf);
// If backup session is updated to FAILED state - means we
@ -376,9 +371,8 @@ public abstract class TableBackupClient {
// compose the backup complete data
String backupCompleteData =
obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs()
+ ",completets=" + backupInfo.getCompleteTs() + ",bytescopied="
+ backupInfo.getTotalBytesCopied();
obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs() + ",completets="
+ backupInfo.getCompleteTs() + ",bytescopied=" + backupInfo.getTotalBytesCopied();
if (LOG.isDebugEnabled()) {
LOG.debug("Backup " + backupInfo.getBackupId() + " finished: " + backupCompleteData);
}
@ -404,23 +398,26 @@ public abstract class TableBackupClient {
/**
* Backup request execution.
*
* @throws IOException if the execution of the backup fails
*/
public abstract void execute() throws IOException;
protected Stage getTestStage() {
return Stage.valueOf("stage_"+ conf.getInt(BACKUP_TEST_MODE_STAGE, 0));
return Stage.valueOf("stage_" + conf.getInt(BACKUP_TEST_MODE_STAGE, 0));
}
protected void failStageIf(Stage stage) throws IOException {
Stage current = getTestStage();
if (current == stage) {
throw new IOException("Failed stage " + stage+" in testing");
throw new IOException("Failed stage " + stage + " in testing");
}
}
public enum Stage {
stage_0, stage_1, stage_2, stage_3, stage_4
stage_0,
stage_1,
stage_2,
stage_3,
stage_4
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -25,7 +25,6 @@ import java.math.BigDecimal;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@ -130,8 +129,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
* @param bytesCopied bytes copied
* @throws NoNodeException exception
*/
static void updateProgress(BackupInfo backupInfo, BackupManager backupManager,
int newProgress, long bytesCopied) throws IOException {
static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, int newProgress,
long bytesCopied) throws IOException {
// compose the new backup progress data, using fake number for now
String backupProgressData = newProgress + "%";
@ -142,12 +141,10 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
}
/**
* Extends DistCp for progress updating to backup system table
* during backup. Using DistCpV2 (MAPREDUCE-2765).
* Simply extend it and override execute() method to get the
* Job reference for progress updating.
* Only the argument "src1, [src2, [...]] dst" is supported,
* no more DistCp options.
* Extends DistCp for progress updating to backup system table during backup. Using DistCpV2
* (MAPREDUCE-2765). Simply extend it and override execute() method to get the Job reference for
* progress updating. Only the argument "src1, [src2, [...]] dst" is supported, no more DistCp
* options.
*/
class BackupDistCp extends DistCp {
@ -162,8 +159,6 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
this.backupManager = backupManager;
}
@Override
public Job execute() throws Exception {
@ -188,16 +183,14 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
long totalSrcLgth = 0;
for (Path aSrc : srcs) {
totalSrcLgth +=
BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc);
totalSrcLgth += BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc);
}
// Async call
job = super.execute();
// Update the copy progress to system table every 0.5s if progress value changed
int progressReportFreq =
MapReduceBackupCopyJob.this.getConf().getInt("hbase.backup.progressreport.frequency",
500);
int progressReportFreq = MapReduceBackupCopyJob.this.getConf()
.getInt("hbase.backup.progressreport.frequency", 500);
float lastProgress = progressDone;
while (!job.isComplete()) {
float newProgress =
@ -241,8 +234,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
String jobID = job.getJobID().toString();
job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID);
LOG.debug("DistCp job-id: " + jobID + " completed: " + job.isComplete() + " "
+ job.isSuccessful());
LOG.debug(
"DistCp job-id: " + jobID + " completed: " + job.isComplete() + " " + job.isSuccessful());
Counters ctrs = job.getCounters();
LOG.debug(Objects.toString(ctrs));
if (job.isComplete() && !job.isSuccessful()) {
@ -252,11 +245,11 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
return job;
}
private Field getInputOptionsField(Class<?> classDistCp) throws IOException{
private Field getInputOptionsField(Class<?> classDistCp) throws IOException {
Field f = null;
try {
f = classDistCp.getDeclaredField("inputOptions");
} catch(Exception e) {
} catch (Exception e) {
// Haddop 3
try {
f = classDistCp.getDeclaredField("context");
@ -268,7 +261,7 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
}
@SuppressWarnings("unchecked")
private List<Path> getSourcePaths(Field fieldInputOptions) throws IOException{
private List<Path> getSourcePaths(Field fieldInputOptions) throws IOException {
Object options;
try {
options = fieldInputOptions.get(this);
@ -282,9 +275,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
return (List<Path>) methodGetSourcePaths.invoke(options);
}
} catch (IllegalArgumentException | IllegalAccessException |
ClassNotFoundException | NoSuchMethodException |
SecurityException | InvocationTargetException e) {
} catch (IllegalArgumentException | IllegalAccessException | ClassNotFoundException
| NoSuchMethodException | SecurityException | InvocationTargetException e) {
throw new IOException(e);
}
@ -352,8 +344,6 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
return getSourcePaths(options);
}
private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException {
FileSystem fs = pathToListFile.getFileSystem(conf);
fs.delete(pathToListFile, false);

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.backup.mapreduce;
import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@ -52,9 +53,8 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* MapReduce implementation of {@link BackupMergeJob}
* Must be initialized with configuration of a backup destination cluster
*
* MapReduce implementation of {@link BackupMergeJob} Must be initialized with configuration of a
* backup destination cluster
*/
@InterfaceAudience.Private
public class MapReduceBackupMergeJob implements BackupMergeJob {
@ -119,9 +119,8 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds);
String dirs = StringUtils.join(dirPaths, ",");
Path bulkOutputPath =
BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]),
getConf(), false);
Path bulkOutputPath = BackupUtils.getBulkOutputDir(
BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false);
// Delete content if exists
if (fs.exists(bulkOutputPath)) {
if (!fs.delete(bulkOutputPath, true)) {
@ -149,14 +148,14 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
// PHASE 2 (modification of a backup file system)
// Move existing mergedBackupId data into tmp directory
// we will need it later in case of a failure
Path tmpBackupDir = HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot,
mergedBackupId);
Path tmpBackupDir =
HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, mergedBackupId);
Path backupDirPath = HBackupFileSystem.getBackupPath(backupRoot, mergedBackupId);
if (!fs.rename(backupDirPath, tmpBackupDir)) {
throw new IOException("Failed to rename "+ backupDirPath +" to "+tmpBackupDir);
throw new IOException("Failed to rename " + backupDirPath + " to " + tmpBackupDir);
} else {
LOG.debug("Renamed "+ backupDirPath +" to "+ tmpBackupDir);
LOG.debug("Renamed " + backupDirPath + " to " + tmpBackupDir);
}
// Move new data into backup dest
for (Pair<TableName, Path> tn : processedTableList) {
@ -170,7 +169,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
// Delete tmp dir (Rename back during repair)
if (!fs.delete(tmpBackupDir, true)) {
// WARN and ignore
LOG.warn("Could not delete tmp dir: "+ tmpBackupDir);
LOG.warn("Could not delete tmp dir: " + tmpBackupDir);
}
// Delete old data
deleteBackupImages(backupsToDelete, conn, fs, backupRoot);
@ -193,8 +192,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
} else {
// backup repair must be run
throw new IOException(
"Backup merge operation failed, run backup repair tool to restore system's integrity",
e);
"Backup merge operation failed, run backup repair tool to restore system's integrity", e);
}
} finally {
table.close();
@ -220,8 +218,10 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
}
// Keep meta
String fileName = p.toString();
if (fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0
|| fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0) {
if (
fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0
|| fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0
) {
toKeep.add(p);
}
}
@ -249,7 +249,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
}
}
/**
/**
* Converts path before copying
* @param p path
* @param backupDirPath backup root
@ -350,13 +350,13 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
Path newDst = new Path(dest, family);
if (fs.exists(newDst)) {
if (!fs.delete(newDst, true)) {
throw new IOException("failed to delete :"+ newDst);
throw new IOException("failed to delete :" + newDst);
}
} else {
fs.mkdirs(dest);
}
boolean result = fs.rename(fst.getPath(), dest);
LOG.debug("MoveData from "+ fst.getPath() +" to "+ dest+" result="+ result);
LOG.debug("MoveData from " + fst.getPath() + " to " + dest + " result=" + result);
}
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.backup.mapreduce;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
@ -69,11 +68,9 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
}
/**
* A mapper that just writes out cells. This one can be used together with
* {@link CellSortReducer}
* A mapper that just writes out cells. This one can be used together with {@link CellSortReducer}
*/
static class HFileCellMapper extends
Mapper<NullWritable, Cell, ImmutableBytesWritable, Cell> {
static class HFileCellMapper extends Mapper<NullWritable, Cell, ImmutableBytesWritable, Cell> {
@Override
public void map(NullWritable key, Cell value, Context context)
@ -100,8 +97,7 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
String tabName = args[1];
conf.setStrings(TABLES_KEY, tabName);
conf.set(FileInputFormat.INPUT_DIR, inputDirs);
Job job =
Job.getInstance(conf,
Job job = Job.getInstance(conf,
conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime()));
job.setJarByClass(MapReduceHFileSplitterJob.class);
job.setInputFormatClass(HFileInputFormat.class);

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -34,13 +34,10 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* MapReduce implementation of {@link RestoreJob}
*
* For backup restore, it runs {@link MapReduceHFileSplitterJob} job and creates
* HFiles which are aligned with a region boundaries of a table being
* restored.
*
* The resulting HFiles then are loaded using HBase bulk load tool {@link BulkLoadHFiles}.
* MapReduce implementation of {@link RestoreJob} For backup restore, it runs
* {@link MapReduceHFileSplitterJob} job and creates HFiles which are aligned with a region
* boundaries of a table being restored. The resulting HFiles then are loaded using HBase bulk load
* tool {@link BulkLoadHFiles}.
*/
@InterfaceAudience.Private
public class MapReduceRestoreJob implements RestoreJob {
@ -74,15 +71,12 @@ public class MapReduceRestoreJob implements RestoreJob {
for (int i = 0; i < tableNames.length; i++) {
LOG.info("Restore " + tableNames[i] + " into " + newTableNames[i]);
Path bulkOutputPath =
BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]),
getConf());
Path bulkOutputPath = BackupUtils
.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]), getConf());
Configuration conf = getConf();
conf.set(bulkOutputConfKey, bulkOutputPath.toString());
String[] playerArgs = {
dirs, fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i]
.getNameAsString()
};
String[] playerArgs = { dirs,
fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i].getNameAsString() };
int result;
try {
@ -97,8 +91,8 @@ public class MapReduceRestoreJob implements RestoreJob {
}
if (loader.bulkLoad(newTableNames[i], bulkOutputPath).isEmpty()) {
throw new IOException("Can not restore from backup directory " + dirs +
" (check Hadoop and HBase logs). Bulk loader returns null");
throw new IOException("Can not restore from backup directory " + dirs
+ " (check Hadoop and HBase logs). Bulk loader returns null");
}
} else {
throw new IOException("Can not restore from backup directory " + dirs
@ -107,8 +101,8 @@ public class MapReduceRestoreJob implements RestoreJob {
LOG.debug("Restore Job finished:" + result);
} catch (Exception e) {
LOG.error(e.toString(), e);
throw new IOException("Can not restore from backup directory " + dirs
+ " (check Hadoop and HBase logs) ", e);
throw new IOException(
"Can not restore from backup directory " + dirs + " (check Hadoop and HBase logs) ", e);
}
}
}

View File

@ -1,5 +1,4 @@
/**
*
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -43,6 +42,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils;
@ -62,8 +62,7 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
@Override
public void init(Map<String, Object> params) {
MasterServices master = (MasterServices) MapUtils.getObject(params,
HMaster.MASTER);
MasterServices master = (MasterServices) MapUtils.getObject(params, HMaster.MASTER);
if (master != null) {
conn = master.getConnection();
if (getConf() == null) {
@ -79,7 +78,6 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
}
}
private Map<Address, Long> getServersToOldestBackupMapping(List<BackupInfo> backups)
throws IOException {
Map<Address, Long> serverAddressToLastBackupMap = new HashMap<>();
@ -136,8 +134,10 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
Address.fromString(BackupUtils.parseHostNameFromLogFile(file.getPath()));
long walTimestamp = AbstractFSWALProvider.getTimestamp(file.getPath().getName());
if (!addressToLastBackupMap.containsKey(walServerAddress)
|| addressToLastBackupMap.get(walServerAddress) >= walTimestamp) {
if (
!addressToLastBackupMap.containsKey(walServerAddress)
|| addressToLastBackupMap.get(walServerAddress) >= walTimestamp
) {
filteredFiles.add(file);
}
} catch (Exception ex) {
@ -147,8 +147,8 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
}
}
LOG
.info("Total files: {}, Filtered Files: {}", IterableUtils.size(files), filteredFiles.size());
LOG.info("Total files: {}, Filtered Files: {}", IterableUtils.size(files),
filteredFiles.size());
return filteredFiles;
}
@ -156,8 +156,10 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
public void setConf(Configuration config) {
// If backup is disabled, keep all members null
super.setConf(config);
if (!config.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) {
if (
!config.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)
) {
LOG.warn("Backup is disabled - allowing all wals to be deleted");
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,14 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.master;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadPoolExecutor;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
@ -89,13 +87,11 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
// setup the default procedure coordinator
String name = master.getServerName().toString();
// get the configuration for the coordinator
Configuration conf = master.getConfiguration();
long wakeFrequency = conf.getInt(BACKUP_WAKE_MILLIS_KEY, BACKUP_WAKE_MILLIS_DEFAULT);
long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY,BACKUP_TIMEOUT_MILLIS_DEFAULT);
int opThreads = conf.getInt(BACKUP_POOL_THREAD_NUMBER_KEY,
BACKUP_POOL_THREAD_NUMBER_DEFAULT);
long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT);
int opThreads = conf.getInt(BACKUP_POOL_THREAD_NUMBER_KEY, BACKUP_POOL_THREAD_NUMBER_DEFAULT);
// setup the default procedure coordinator
ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, opThreads);

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.regionserver;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.Callable;
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
import org.apache.hadoop.hbase.client.Connection;
@ -111,8 +109,8 @@ public class LogRollBackupSubprocedure extends Subprocedure {
String server = host + ":" + port;
Long sts = serverTimestampMap.get(host);
if (sts != null && sts > highest) {
LOG.warn("Won't update server's last roll log result: current=" + sts + " new="
+ highest);
LOG
.warn("Won't update server's last roll log result: current=" + sts + " new=" + highest);
return null;
}
// write the log number to backup system table.

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.regionserver;
import java.io.Closeable;
@ -28,19 +27,18 @@ import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* Handle running each of the individual tasks for completing a backup procedure on a region
* server.
* Handle running each of the individual tasks for completing a backup procedure on a region server.
*/
@InterfaceAudience.Private
public class LogRollBackupSubprocedurePool implements Closeable, Abortable {
@ -58,8 +56,7 @@ public class LogRollBackupSubprocedurePool implements Closeable, Abortable {
public LogRollBackupSubprocedurePool(String name, Configuration conf) {
// configure the executor service
long keepAlive =
conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY,
long keepAlive = conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY,
LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT);
int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS);
this.name = name;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.regionserver;
import java.io.IOException;
import java.util.concurrent.ThreadPoolExecutor;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
import org.apache.hadoop.hbase.backup.impl.BackupManager;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.backup.util;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.util;
import java.io.FileNotFoundException;
@ -83,8 +82,8 @@ public final class BackupUtils {
* @param rsLogTimestampMap timestamp map
* @return the min timestamp of each RS
*/
public static Map<String, Long> getRSLogTimestampMins(
Map<TableName, Map<String, Long>> rsLogTimestampMap) {
public static Map<String, Long>
getRSLogTimestampMins(Map<TableName, Map<String, Long>> rsLogTimestampMap) {
if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) {
return null;
}
@ -140,8 +139,8 @@ public final class BackupUtils {
FSTableDescriptors descriptors =
new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf));
descriptors.createTableDescriptorForTableDirectory(target, orig, false);
LOG.debug("Attempting to copy table info for:" + table + " target: " + target +
" descriptor: " + orig);
LOG.debug("Attempting to copy table info for:" + table + " target: " + target
+ " descriptor: " + orig);
LOG.debug("Finished copying tableinfo.");
List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
// For each region, write the region info to disk
@ -325,8 +324,7 @@ public final class BackupUtils {
String newMsg = null;
if (expMsg.contains("No FileSystem for scheme")) {
newMsg =
"Unsupported filesystem scheme found in the backup target url. Error Message: "
+ expMsg;
"Unsupported filesystem scheme found in the backup target url. Error Message: " + expMsg;
LOG.error(newMsg);
throw new IOException(newMsg);
} else {
@ -449,9 +447,8 @@ public final class BackupUtils {
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
for (TableName table : backupInfo.getTables()) {
Path targetDirPath =
new Path(getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(),
table));
Path targetDirPath = new Path(
getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
if (outputFs.delete(targetDirPath, true)) {
LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
} else {
@ -535,8 +532,8 @@ public final class BackupUtils {
}
/**
* Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the
* 'path' component of a Path's URI: e.g. If a Path is
* Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the 'path'
* component of a Path's URI: e.g. If a Path is
* <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns
* <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print
* out a Path without qualifying Filesystem instance.
@ -693,10 +690,9 @@ public final class BackupUtils {
public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit)
throws IOException {
FileSystem fs = FileSystem.get(conf);
String tmp = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
fs.getHomeDirectory() + "/hbase-staging");
Path path =
new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-"
String tmp =
conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging");
Path path = new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-"
+ EnvironmentEdgeManager.currentTime());
if (deleteOnExit) {
fs.deleteOnExit(path);

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.util;
import java.io.FileNotFoundException;
@ -206,8 +205,7 @@ public class RestoreTool {
}
public void fullRestoreTable(Connection conn, Path tableBackupPath, TableName tableName,
TableName newTableName, boolean truncateIfExists, String lastIncrBackupId)
throws IOException {
TableName newTableName, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
createAndRestoreTable(conn, tableName, newTableName, tableBackupPath, truncateIfExists,
lastIncrBackupId);
}
@ -228,9 +226,8 @@ public class RestoreTool {
/**
* Returns value represent path for:
* ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/
* snapshot_1396650097621_namespace_table"
* this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo,
* .data.manifest (trunk)
* snapshot_1396650097621_namespace_table" this path contains .snapshotinfo, .tabledesc (0.96 and
* 0.98) this path contains .snapshotinfo, .data.manifest (trunk)
* @param tableName table name
* @return path to table info
* @throws IOException exception
@ -265,8 +262,8 @@ public class RestoreTool {
if (!tableDescriptor.getTableName().equals(tableName)) {
LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
+ tableInfoPath.toString());
LOG.error("tableDescriptor.getNameAsString() = "
+ tableDescriptor.getTableName().getNameAsString());
LOG.error(
"tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString());
throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName
+ " under tableInfoPath: " + tableInfoPath.toString());
}
@ -277,8 +274,7 @@ public class RestoreTool {
String lastIncrBackupId) throws IOException {
if (lastIncrBackupId != null) {
String target =
BackupUtils.getTableBackupDir(backupRootPath.toString(),
lastIncrBackupId, tableName);
BackupUtils.getTableBackupDir(backupRootPath.toString(), lastIncrBackupId, tableName);
return FSTableDescriptors.getTableDescriptorFromFs(fileSys, new Path(target));
}
return null;
@ -315,8 +311,8 @@ public class RestoreTool {
LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost");
}
} else {
throw new IOException("Table snapshot directory: " +
tableSnapshotPath + " does not exist.");
throw new IOException(
"Table snapshot directory: " + tableSnapshotPath + " does not exist.");
}
}
@ -333,8 +329,8 @@ public class RestoreTool {
truncateIfExists);
return;
} else {
throw new IllegalStateException("Cannot restore hbase table because directory '"
+ " tableArchivePath is null.");
throw new IllegalStateException(
"Cannot restore hbase table because directory '" + " tableArchivePath is null.");
}
}
@ -356,7 +352,8 @@ public class RestoreTool {
RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
Path[] paths = new Path[regionPathList.size()];
regionPathList.toArray(paths);
restoreService.run(paths, new TableName[]{tableName}, new TableName[] {newTableName}, true);
restoreService.run(paths, new TableName[] { tableName }, new TableName[] { newTableName },
true);
} catch (Exception e) {
LOG.error(e.toString(), e);
@ -430,9 +427,11 @@ public class RestoreTool {
// start to parse hfile inside one family dir
Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
for (Path hfile : hfiles) {
if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".")
if (
hfile.getName().startsWith("_") || hfile.getName().startsWith(".")
|| StoreFileInfo.isReference(hfile.getName())
|| HFileLink.isHFileLink(hfile.getName())) {
|| HFileLink.isHFileLink(hfile.getName())
) {
continue;
}
HFile.Reader reader = HFile.createReader(fs, hfile, conf);
@ -476,8 +475,8 @@ public class RestoreTool {
boolean createNew = false;
if (admin.tableExists(targetTableName)) {
if (truncateIfExists) {
LOG.info("Truncating exising target table '" + targetTableName
+ "', preserving region splits");
LOG.info(
"Truncating exising target table '" + targetTableName + "', preserving region splits");
admin.disableTable(targetTableName);
admin.truncateTable(targetTableName, true);
} else {
@ -497,7 +496,7 @@ public class RestoreTool {
// create table using table descriptor and region boundaries
admin.createTable(htd, keys);
}
} catch (NamespaceNotFoundException e){
} catch (NamespaceNotFoundException e) {
LOG.warn("There was no namespace and the same will be created");
String namespaceAsString = targetTableName.getNamespaceAsString();
LOG.info("Creating target namespace '" + namespaceAsString + "'");

View File

@ -1,5 +1,4 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -111,8 +110,8 @@ public class TestBackupBase {
public IncrementalTableBackupClientForTest() {
}
public IncrementalTableBackupClientForTest(Connection conn,
String backupId, BackupRequest request) throws IOException {
public IncrementalTableBackupClientForTest(Connection conn, String backupId,
BackupRequest request) throws IOException {
super(conn, backupId, request);
}
@ -133,7 +132,7 @@ public class TestBackupBase {
BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
// convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
convertWALsToHFiles();
incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()},
incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() },
backupInfo.getBackupRootDir());
failStageIf(Stage.stage_2);
@ -215,9 +214,8 @@ public class TestBackupBase {
// SNAPSHOT_TABLES:
backupInfo.setPhase(BackupPhase.SNAPSHOT);
for (TableName tableName : tableList) {
String snapshotName =
"snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
+ tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime())
+ "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
snapshotTable(admin, tableName, snapshotName);
backupInfo.setSnapshotName(tableName, snapshotName);
@ -242,8 +240,7 @@ public class TestBackupBase {
backupManager.readLogTimestampMap();
Long newStartCode =
BackupUtils.getMinValue(BackupUtils
.getRSLogTimestampMins(newTableSetTimestampMap));
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode);
failStageIf(Stage.stage_4);
// backup complete
@ -251,7 +248,7 @@ public class TestBackupBase {
} catch (Exception e) {
if(autoRestoreOnFailure) {
if (autoRestoreOnFailure) {
failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ",
BackupType.FULL, conf);
}
@ -261,7 +258,7 @@ public class TestBackupBase {
}
public static void setUpHelper() throws Exception {
BACKUP_ROOT_DIR = Path.SEPARATOR +"backupUT";
BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT";
BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT";
if (secure) {
@ -299,23 +296,21 @@ public class TestBackupBase {
TEST_UTIL.startMiniMapReduceCluster();
BACKUP_ROOT_DIR =
new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")),
BACKUP_ROOT_DIR).toString();
new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), BACKUP_ROOT_DIR)
.toString();
LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
if (useSecondCluster) {
BACKUP_REMOTE_ROOT_DIR =
new Path(new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS"))
+ BACKUP_REMOTE_ROOT_DIR).toString();
BACKUP_REMOTE_ROOT_DIR = new Path(
new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) + BACKUP_REMOTE_ROOT_DIR)
.toString();
LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
}
createTables();
populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1);
}
/**
* Setup Cluster with appropriate configurations before running tests.
*
* @throws Exception if starting the mini cluster or setting up the tables fails
*/
@BeforeClass
@ -327,7 +322,6 @@ public class TestBackupBase {
setUpHelper();
}
private static void populateFromMasterConfig(Configuration masterConf, Configuration conf) {
Iterator<Entry<String, String>> it = masterConf.iterator();
while (it.hasNext()) {
@ -341,7 +335,7 @@ public class TestBackupBase {
*/
@AfterClass
public static void tearDown() throws Exception {
try{
try {
SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin());
} catch (Exception e) {
}
@ -367,12 +361,11 @@ public class TestBackupBase {
return t;
}
protected BackupRequest createBackupRequest(BackupType type,
List<TableName> tables, String path) {
protected BackupRequest createBackupRequest(BackupType type, List<TableName> tables,
String path) {
BackupRequest.Builder builder = new BackupRequest.Builder();
BackupRequest request = builder.withBackupType(type)
.withTableList(tables)
.withTargetRootDir(path).build();
BackupRequest request =
builder.withBackupType(type).withTableList(tables).withTargetRootDir(path).build();
return request;
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -41,7 +41,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
/**
* Verify that full backup is created on a single empty table correctly.
*
* @throws Exception if doing the full backup fails
*/
@Test
@ -53,7 +52,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
/**
* Verify that full backup is created on multiple empty tables correctly.
*
* @throws Exception if doing the full backup fails
*/
@Test
@ -66,7 +64,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
/**
* Verify that full backup fails on a single table that does not exist.
*
* @throws Exception if doing the full backup fails
*/
@Test(expected = IOException.class)
@ -78,7 +75,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
/**
* Verify that full backup fails on multiple tables that do not exist.
*
* @throws Exception if doing the full backup fails
*/
@Test(expected = IOException.class)
@ -90,7 +86,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
/**
* Verify that full backup fails on tableset containing real and fake tables.
*
* @throws Exception if doing the full backup fails
*/
@Test(expected = IOException.class)

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -86,7 +86,6 @@ public class TestBackupCommandLineTool {
assertTrue(output.indexOf(USAGE_DESCRIBE) >= 0);
}
@Test
public void testBackupDriverCreateTopLevelBackupDest() throws Exception {
String[] args = new String[] { "create", "full", "hdfs://localhost:1020", "-t", "t1" };
@ -107,7 +106,6 @@ public class TestBackupCommandLineTool {
assertTrue(output.indexOf(USAGE_CREATE) >= 0);
assertTrue(output.indexOf(BackupRestoreConstants.OPTION_TABLE_LIST_DESC) > 0);
baos = new ByteArrayOutputStream();
System.setOut(new PrintStream(baos));
args = new String[] { "create", "-h" };

Some files were not shown because too many files have changed in this diff Show More