HBASE-26899 Run spotless:apply

Closes #4312
This commit is contained in:
Duo Zhang 2022-05-01 22:15:04 +08:00
parent 0edecbf9e0
commit 9c8c9e7fbf
4645 changed files with 110099 additions and 131240 deletions

View File

@ -106,5 +106,3 @@ else
echo "No command specified" >&2 echo "No command specified" >&2
exit 1 exit 1
fi fi

View File

@ -33,4 +33,3 @@ export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin:
export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData"}" export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData"}"
ulimit -n ulimit -n

View File

@ -1,4 +1,4 @@
<?xml version="1.0"?> <?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> <project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!-- <!--
/** /**
@ -21,8 +21,8 @@
--> -->
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<parent> <parent>
<artifactId>hbase</artifactId>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version> <version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>..</relativePath> <relativePath>..</relativePath>
</parent> </parent>

View File

@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to the client. This tests the hbase-client package and all of the client * Tag a test as related to the client. This tests the hbase-client package and all of the client
* tests in hbase-server. * tests in hbase-server.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to coprocessors. * Tag a test as related to coprocessors.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package. * Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as failing commonly on public build infrastructure. * Tag a test as failing commonly on public build infrastructure.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and * Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and
* the like. * the like.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,23 +15,20 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as 'integration/system' test, meaning that the test class has the following * Tag a test as 'integration/system' test, meaning that the test class has the following
* characteristics: * characteristics:
* <ul> * <ul>
* <li> Possibly takes hours to complete</li> * <li>Possibly takes hours to complete</li>
* <li> Can be run on a mini cluster or an actual cluster</li> * <li>Can be run on a mini cluster or an actual cluster</li>
* <li> Can make changes to the given cluster (starting stopping daemons, etc)</li> * <li>Can make changes to the given cluster (starting stopping daemons, etc)</li>
* <li> Should not be run in parallel of other integration tests</li> * <li>Should not be run in parallel of other integration tests</li>
* </ul> * </ul>
* * Integration / System tests should have a class name starting with "IntegrationTest", and should
* Integration / System tests should have a class name starting with "IntegrationTest", and * be annotated with @Category(IntegrationTests.class). Integration tests can be run using the
* should be annotated with @Category(IntegrationTests.class). Integration tests can be run * IntegrationTestsDriver class or from mvn verify.
* using the IntegrationTestsDriver class or from mvn verify.
*
* @see SmallTests * @see SmallTests
* @see MediumTests * @see MediumTests
* @see LargeTests * @see LargeTests

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,21 +15,19 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tagging a test as 'large', means that the test class has the following characteristics: * Tagging a test as 'large', means that the test class has the following characteristics:
* <ul> * <ul>
* <li>it can executed in an isolated JVM (Tests can however be executed in different JVM on the * <li>it can executed in an isolated JVM (Tests can however be executed in different JVM on the
* same machine simultaneously so be careful two concurrent tests end up fighting over ports * same machine simultaneously so be careful two concurrent tests end up fighting over ports or
* or other singular resources).</li> * other singular resources).</li>
* <li>ideally, the whole large test-suite/class, no matter how many or how few test methods it * <li>ideally, the whole large test-suite/class, no matter how many or how few test methods it has,
* has, will run in last less than three minutes</li> * will run in last less than three minutes</li>
* <li>No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests' * <li>No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests'
* if you need to run tests longer than this.</li> * if you need to run tests longer than this.</li>
* </ul> * </ul>
*
* @see SmallTests * @see SmallTests
* @see MediumTests * @see MediumTests
* @see IntegrationTests * @see IntegrationTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to mapred or mapreduce. * Tag a test as related to mapred or mapreduce.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to the master. * Tag a test as related to the master.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,21 +15,18 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tagging a test as 'medium' means that the test class has the following characteristics: * Tagging a test as 'medium' means that the test class has the following characteristics:
* <ul> * <ul>
* <li>it can be executed in an isolated JVM (Tests can however be executed in different JVMs on * <li>it can be executed in an isolated JVM (Tests can however be executed in different JVMs on the
* the same machine simultaneously so be careful two concurrent tests end up fighting over ports * same machine simultaneously so be careful two concurrent tests end up fighting over ports or
* or other singular resources).</li> * other singular resources).</li>
* <li>ideally, the whole medium test-suite/class, no matter how many or how few test methods it * <li>ideally, the whole medium test-suite/class, no matter how many or how few test methods it
* has, will complete in 50 seconds; otherwise make it a 'large' test.</li> * has, will complete in 50 seconds; otherwise make it a 'large' test.</li>
* </ul> * </ul>
* * Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster.
* Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster.
*
* @see SmallTests * @see SmallTests
* @see LargeTests * @see LargeTests
* @see IntegrationTests * @see IntegrationTests

View File

@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as not easily falling into any of the below categories. * Tag a test as not easily falling into any of the below categories.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to RPC. * Tag a test as related to RPC.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to the regionserver. * Tag a test as related to the regionserver.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to replication. * Tag a test as related to replication.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to the REST capability of HBase. * Tag a test as related to the REST capability of HBase.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to security. * Tag a test as related to security.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -20,14 +20,14 @@ package org.apache.hadoop.hbase.testclassification;
/** /**
* Tagging a test as 'small' means that the test class has the following characteristics: * Tagging a test as 'small' means that the test class has the following characteristics:
* <ul> * <ul>
* <li>it can be run simultaneously with other small tests all in the same JVM</li> * <li>it can be run simultaneously with other small tests all in the same JVM</li>
* <li>ideally, the WHOLE implementing test-suite/class, no matter how many or how few test * <li>ideally, the WHOLE implementing test-suite/class, no matter how many or how few test methods
* methods it has, should take less than 15 seconds to complete</li> * it has, should take less than 15 seconds to complete</li>
* <li>it does not use a cluster</li> * <li>it does not use a cluster</li>
* </ul> * </ul>
*
* @see MediumTests * @see MediumTests
* @see LargeTests * @see LargeTests
* @see IntegrationTests * @see IntegrationTests
*/ */
public interface SmallTests {} public interface SmallTests {
}

View File

@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build * Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build
* infrastructure. * infrastructure.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**
* Tag a test as region tests which takes longer than 5 minutes to run on public build * Tag a test as region tests which takes longer than 5 minutes to run on public build
* infrastructure. * infrastructure.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
* @see org.apache.hadoop.hbase.testclassification.FilterTests * @see org.apache.hadoop.hbase.testclassification.FilterTests

View File

@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.testclassification; package org.apache.hadoop.hbase.testclassification;
/** /**

View File

@ -1,6 +1,5 @@
<?xml version="1.0"?> <?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" <project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!-- <!--
/** /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
@ -23,8 +22,8 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<parent> <parent>
<artifactId>hbase-archetypes</artifactId>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-archetypes</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version> <version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>..</relativePath> <relativePath>..</relativePath>
</parent> </parent>
@ -58,10 +57,10 @@
further using xml-maven-plugin for xslt transformation, below. --> further using xml-maven-plugin for xslt transformation, below. -->
<execution> <execution>
<id>hbase-client__copy-src-to-build-archetype-subdir</id> <id>hbase-client__copy-src-to-build-archetype-subdir</id>
<phase>generate-resources</phase>
<goals> <goals>
<goal>copy-resources</goal> <goal>copy-resources</goal>
</goals> </goals>
<phase>generate-resources</phase>
<configuration> <configuration>
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${build.archetype.subdir}</outputDirectory> <outputDirectory>/${project.basedir}/../${hbase-client.dir}/${build.archetype.subdir}</outputDirectory>
<resources> <resources>
@ -76,29 +75,30 @@
</execution> </execution>
<execution> <execution>
<id>hbase-client__copy-pom-to-temp-for-xslt-processing</id> <id>hbase-client__copy-pom-to-temp-for-xslt-processing</id>
<phase>generate-resources</phase>
<goals> <goals>
<goal>copy-resources</goal> <goal>copy-resources</goal>
</goals> </goals>
<phase>generate-resources</phase>
<configuration> <configuration>
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.exemplar.subdir}</outputDirectory> <outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.exemplar.subdir}</outputDirectory>
<resources> <resources>
<resource> <resource>
<directory>/${project.basedir}/../${hbase-client.dir}</directory> <directory>/${project.basedir}/../${hbase-client.dir}</directory>
<filtering>true</filtering> <!-- filtering replaces ${project.version} with literal --> <filtering>true</filtering>
<!-- filtering replaces ${project.version} with literal -->
<includes> <includes>
<include>pom.xml</include> <include>pom.xml</include>
</includes> </includes>
</resource> </resource>
</resources> </resources>
</configuration> </configuration>
</execution> </execution>
<execution> <execution>
<id>hbase-shaded-client__copy-src-to-build-archetype-subdir</id> <id>hbase-shaded-client__copy-src-to-build-archetype-subdir</id>
<phase>generate-resources</phase>
<goals> <goals>
<goal>copy-resources</goal> <goal>copy-resources</goal>
</goals> </goals>
<phase>generate-resources</phase>
<configuration> <configuration>
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${build.archetype.subdir}</outputDirectory> <outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${build.archetype.subdir}</outputDirectory>
<resources> <resources>
@ -113,20 +113,21 @@
</execution> </execution>
<execution> <execution>
<id>hbase-shaded-client__copy-pom-to-temp-for-xslt-processing</id> <id>hbase-shaded-client__copy-pom-to-temp-for-xslt-processing</id>
<phase>generate-resources</phase>
<goals> <goals>
<goal>copy-resources</goal> <goal>copy-resources</goal>
</goals> </goals>
<phase>generate-resources</phase>
<configuration> <configuration>
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.exemplar.subdir}</outputDirectory> <outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.exemplar.subdir}</outputDirectory>
<resources> <resources>
<resource> <resource>
<directory>/${project.basedir}/../${hbase-shaded-client.dir}</directory> <directory>/${project.basedir}/../${hbase-shaded-client.dir}</directory>
<filtering>true</filtering> <!-- filtering replaces ${project.version} with literal --> <filtering>true</filtering>
<!-- filtering replaces ${project.version} with literal -->
<includes> <includes>
<include>pom.xml</include> <include>pom.xml</include>
</includes> </includes>
</resource> </resource>
</resources> </resources>
</configuration> </configuration>
</execution> </execution>
@ -137,10 +138,10 @@
using xml-maven-plugin for xslt transformation, below. --> using xml-maven-plugin for xslt transformation, below. -->
<execution> <execution>
<id>hbase-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id> <id>hbase-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id>
<phase>prepare-package</phase>
<goals> <goals>
<goal>copy-resources</goal> <goal>copy-resources</goal>
</goals> </goals>
<phase>prepare-package</phase>
<configuration> <configuration>
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.archetype.subdir}</outputDirectory> <outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.archetype.subdir}</outputDirectory>
<resources> <resources>
@ -149,16 +150,16 @@
<includes> <includes>
<include>pom.xml</include> <include>pom.xml</include>
</includes> </includes>
</resource> </resource>
</resources> </resources>
</configuration> </configuration>
</execution> </execution>
<execution> <execution>
<id>hbase-shaded-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id> <id>hbase-shaded-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id>
<phase>prepare-package</phase>
<goals> <goals>
<goal>copy-resources</goal> <goal>copy-resources</goal>
</goals> </goals>
<phase>prepare-package</phase>
<configuration> <configuration>
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.archetype.subdir}</outputDirectory> <outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.archetype.subdir}</outputDirectory>
<resources> <resources>
@ -167,7 +168,7 @@
<includes> <includes>
<include>pom.xml</include> <include>pom.xml</include>
</includes> </includes>
</resource> </resource>
</resources> </resources>
</configuration> </configuration>
</execution> </execution>
@ -182,10 +183,10 @@
<!-- xml-maven-plugin modifies each exemplar project's pom.xml file to convert to standalone project. --> <!-- xml-maven-plugin modifies each exemplar project's pom.xml file to convert to standalone project. -->
<execution> <execution>
<id>modify-exemplar-pom-files-via-xslt</id> <id>modify-exemplar-pom-files-via-xslt</id>
<phase>process-resources</phase>
<goals> <goals>
<goal>transform</goal> <goal>transform</goal>
</goals> </goals>
<phase>process-resources</phase>
<configuration> <configuration>
<transformationSets> <transformationSets>
<transformationSet> <transformationSet>
@ -212,10 +213,10 @@
prevent warnings when project is generated from archetype. --> prevent warnings when project is generated from archetype. -->
<execution> <execution>
<id>modify-archetype-pom-files-via-xslt</id> <id>modify-archetype-pom-files-via-xslt</id>
<phase>package</phase>
<goals> <goals>
<goal>transform</goal> <goal>transform</goal>
</goals> </goals>
<phase>package</phase>
<configuration> <configuration>
<transformationSets> <transformationSets>
<transformationSet> <transformationSet>
@ -242,32 +243,32 @@
</plugin> </plugin>
<plugin> <plugin>
<artifactId>maven-antrun-plugin</artifactId> <artifactId>maven-antrun-plugin</artifactId>
<executions> <executions>
<!-- exec-maven-plugin executes chmod to make scripts executable --> <!-- exec-maven-plugin executes chmod to make scripts executable -->
<execution> <execution>
<id>make-scripts-executable</id> <id>make-scripts-executable</id>
<phase>process-resources</phase>
<goals> <goals>
<goal>run</goal> <goal>run</goal>
</goals> </goals>
<phase>process-resources</phase>
<configuration> <configuration>
<chmod file="${project.basedir}/createArchetypes.sh" perm="+x" /> <chmod file="${project.basedir}/createArchetypes.sh" perm="+x"/>
<chmod file="${project.basedir}/installArchetypes.sh" perm="+x" /> <chmod file="${project.basedir}/installArchetypes.sh" perm="+x"/>
</configuration> </configuration>
</execution> </execution>
<!-- exec-maven-plugin executes script which invokes 'archetype:create-from-project' <!-- exec-maven-plugin executes script which invokes 'archetype:create-from-project'
to derive archetypes from exemplar projects. --> to derive archetypes from exemplar projects. -->
<execution> <execution>
<id>run-createArchetypes-script</id> <id>run-createArchetypes-script</id>
<phase>compile</phase>
<goals> <goals>
<goal>run</goal> <goal>run</goal>
</goals> </goals>
<phase>compile</phase>
<configuration> <configuration>
<exec executable="${shell-executable}" dir="${project.basedir}" failonerror="true"> <exec dir="${project.basedir}" executable="${shell-executable}" failonerror="true">
<arg line="./createArchetypes.sh"/> <arg line="./createArchetypes.sh"/>
</exec> </exec>
</configuration> </configuration>
</execution> </execution>
<!-- exec-maven-plugin executes script which invokes 'install' to install each <!-- exec-maven-plugin executes script which invokes 'install' to install each
@ -277,14 +278,14 @@
which does test generation of a project based on the archetype. --> which does test generation of a project based on the archetype. -->
<execution> <execution>
<id>run-installArchetypes-script</id> <id>run-installArchetypes-script</id>
<phase>install</phase>
<goals> <goals>
<goal>run</goal> <goal>run</goal>
</goals> </goals>
<phase>install</phase>
<configuration> <configuration>
<exec executable="${shell-executable}" dir="${project.basedir}" failonerror="true"> <exec dir="${project.basedir}" executable="${shell-executable}" failonerror="true">
<arg line="./installArchetypes.sh"/> <arg line="./installArchetypes.sh"/>
</exec> </exec>
</configuration> </configuration>
</execution> </execution>
</executions> </executions>

View File

@ -1,8 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" <project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation=
"https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!-- <!--
/** /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
@ -24,8 +21,8 @@
--> -->
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<parent> <parent>
<artifactId>hbase-archetypes</artifactId>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-archetypes</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version> <version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>..</relativePath> <relativePath>..</relativePath>
</parent> </parent>

View File

@ -1,5 +1,4 @@
/** /*
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -38,19 +37,17 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
* Successful running of this application requires access to an active instance * Successful running of this application requires access to an active instance of HBase. For
* of HBase. For install instructions for a standalone instance of HBase, please * install instructions for a standalone instance of HBase, please refer to
* refer to https://hbase.apache.org/book.html#quickstart * https://hbase.apache.org/book.html#quickstart
*/ */
public final class HelloHBase { public final class HelloHBase {
protected static final String MY_NAMESPACE_NAME = "myTestNamespace"; protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable"); static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf"); static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
static final byte[] MY_FIRST_COLUMN_QUALIFIER static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn");
= Bytes.toBytes("myFirstColumn"); static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn");
static final byte[] MY_SECOND_COLUMN_QUALIFIER
= Bytes.toBytes("mySecondColumn");
static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01"); static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01");
// Private constructor included here to avoid checkstyle warnings // Private constructor included here to avoid checkstyle warnings
@ -61,20 +58,20 @@ public final class HelloHBase {
final boolean deleteAllAtEOJ = true; final boolean deleteAllAtEOJ = true;
/** /**
* ConnectionFactory#createConnection() automatically looks for * ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase
* hbase-site.xml (HBase configuration parameters) on the system's * configuration parameters) on the system's CLASSPATH, to enable creation of Connection to
* CLASSPATH, to enable creation of Connection to HBase via ZooKeeper. * HBase via ZooKeeper.
*/ */
try (Connection connection = ConnectionFactory.createConnection(); try (Connection connection = ConnectionFactory.createConnection();
Admin admin = connection.getAdmin()) { Admin admin = connection.getAdmin()) {
admin.getClusterMetrics(); // assure connection successfully established admin.getClusterMetrics(); // assure connection successfully established
System.out.println("\n*** Hello HBase! -- Connection has been " System.out
+ "established via ZooKeeper!!\n"); .println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n");
createNamespaceAndTable(admin); createNamespaceAndTable(admin);
System.out.println("Getting a Table object for [" + MY_TABLE_NAME System.out.println("Getting a Table object for [" + MY_TABLE_NAME
+ "] with which to perform CRUD operations in HBase."); + "] with which to perform CRUD operations in HBase.");
try (Table table = connection.getTable(MY_TABLE_NAME)) { try (Table table = connection.getTable(MY_TABLE_NAME)) {
putRowToTable(table); putRowToTable(table);
@ -92,9 +89,8 @@ public final class HelloHBase {
} }
/** /**
* Invokes Admin#createNamespace and Admin#createTable to create a namespace * Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has
* with a table that has one column-family. * one column-family.
*
* @param admin Standard Admin object * @param admin Standard Admin object
* @throws IOException If IO problem encountered * @throws IOException If IO problem encountered
*/ */
@ -103,48 +99,38 @@ public final class HelloHBase {
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) { if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "]."); System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
admin.createNamespace(NamespaceDescriptor admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build());
.create(MY_NAMESPACE_NAME).build());
} }
if (!admin.tableExists(MY_TABLE_NAME)) { if (!admin.tableExists(MY_TABLE_NAME)) {
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString() System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
+ "], with one Column Family [" + "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME) TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build();
.build();
admin.createTable(desc); admin.createTable(desc);
} }
} }
/** /**
* Invokes Table#put to store a row (with two new columns created 'on the * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table.
* fly') into the table.
*
* @param table Standard Table object (used for CRUD operations). * @param table Standard Table object (used for CRUD operations).
* @throws IOException If IO problem encountered * @throws IOException If IO problem encountered
*/ */
static void putRowToTable(final Table table) throws IOException { static void putRowToTable(final Table table) throws IOException {
table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, table.put(new Put(MY_ROW_ID)
MY_FIRST_COLUMN_QUALIFIER, .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello"))
Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!")));
MY_SECOND_COLUMN_QUALIFIER,
Bytes.toBytes("World!")));
System.out.println("Row [" + Bytes.toString(MY_ROW_ID) System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table ["
+ "] was put into Table [" + table.getName().getNameAsString() + "] in HBase;\n"
+ table.getName().getNameAsString() + "] in HBase;\n" + " the row's two columns (created 'on the fly') are: ["
+ " the row's two columns (created 'on the fly') are: [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
+ Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
} }
/** /**
* Invokes Table#get and prints out the contents of the retrieved row. * Invokes Table#get and prints out the contents of the retrieved row.
*
* @param table Standard Table object * @param table Standard Table object
* @throws IOException If IO problem encountered * @throws IOException If IO problem encountered
*/ */
@ -152,38 +138,32 @@ public final class HelloHBase {
Result row = table.get(new Get(MY_ROW_ID)); Result row = table.get(new Get(MY_ROW_ID));
System.out.println("Row [" + Bytes.toString(row.getRow()) System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table ["
+ "] was retrieved from Table [" + table.getName().getNameAsString() + "] in HBase, with the following content:");
+ table.getName().getNameAsString()
+ "] in HBase, with the following content:");
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap()
: row.getNoVersionMap().entrySet()) { .entrySet()) {
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
System.out.println(" Columns in Column Family [" + columnFamilyName System.out.println(" Columns in Column Family [" + columnFamilyName + "]:");
+ "]:");
for (Entry<byte[], byte[]> columnNameAndValueMap for (Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) {
: colFamilyEntry.getValue().entrySet()) {
System.out.println(" Value of Column [" + columnFamilyName + ":" System.out.println(" Value of Column [" + columnFamilyName + ":"
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
+ Bytes.toString(columnNameAndValueMap.getValue())); + Bytes.toString(columnNameAndValueMap.getValue()));
} }
} }
} }
/** /**
* Checks to see whether a namespace exists. * Checks to see whether a namespace exists.
* * @param admin Standard Admin object
* @param admin Standard Admin object
* @param namespaceName Name of namespace * @param namespaceName Name of namespace
* @return true If namespace exists * @return true If namespace exists
* @throws IOException If IO problem encountered * @throws IOException If IO problem encountered
*/ */
static boolean namespaceExists(final Admin admin, final String namespaceName) static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException {
throws IOException {
try { try {
admin.getNamespaceDescriptor(namespaceName); admin.getNamespaceDescriptor(namespaceName);
} catch (NamespaceNotFoundException e) { } catch (NamespaceNotFoundException e) {
@ -194,28 +174,24 @@ public final class HelloHBase {
/** /**
* Invokes Table#delete to delete test data (i.e. the row) * Invokes Table#delete to delete test data (i.e. the row)
*
* @param table Standard Table object * @param table Standard Table object
* @throws IOException If IO problem is encountered * @throws IOException If IO problem is encountered
*/ */
static void deleteRow(final Table table) throws IOException { static void deleteRow(final Table table) throws IOException {
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table ["
+ "] from Table [" + table.getName().getNameAsString() + "].");
+ table.getName().getNameAsString() + "].");
table.delete(new Delete(MY_ROW_ID)); table.delete(new Delete(MY_ROW_ID));
} }
/** /**
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete
* disable/delete Table and delete Namespace. * Table and delete Namespace.
*
* @param admin Standard Admin object * @param admin Standard Admin object
* @throws IOException If IO problem is encountered * @throws IOException If IO problem is encountered
*/ */
static void deleteNamespaceAndTable(final Admin admin) throws IOException { static void deleteNamespaceAndTable(final Admin admin) throws IOException {
if (admin.tableExists(MY_TABLE_NAME)) { if (admin.tableExists(MY_TABLE_NAME)) {
System.out.println("Disabling/deleting Table [" System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "].");
+ MY_TABLE_NAME.getNameAsString() + "].");
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it. admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
admin.deleteTable(MY_TABLE_NAME); admin.deleteTable(MY_TABLE_NAME);
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -44,10 +44,9 @@ public class TestHelloHBase {
@ClassRule @ClassRule
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestHelloHBase.class); HBaseClassTestRule.forClass(TestHelloHBase.class);
private static final HBaseTestingUtil TEST_UTIL private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
= new HBaseTestingUtil();
@BeforeClass @BeforeClass
public static void beforeClass() throws Exception { public static void beforeClass() throws Exception {
@ -67,13 +66,11 @@ public class TestHelloHBase {
Admin admin = TEST_UTIL.getAdmin(); Admin admin = TEST_UTIL.getAdmin();
exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE); exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE);
assertEquals("#namespaceExists failed: found nonexistent namespace.", assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists);
false, exists);
admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build()); admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build());
exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE); exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE);
assertEquals("#namespaceExists failed: did NOT find existing namespace.", assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists);
true, exists);
admin.deleteNamespace(EXISTING_NAMESPACE); admin.deleteNamespace(EXISTING_NAMESPACE);
} }
@ -82,14 +79,11 @@ public class TestHelloHBase {
Admin admin = TEST_UTIL.getAdmin(); Admin admin = TEST_UTIL.getAdmin();
HelloHBase.createNamespaceAndTable(admin); HelloHBase.createNamespaceAndTable(admin);
boolean namespaceExists boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
= HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists);
assertEquals("#createNamespaceAndTable failed to create namespace.",
true, namespaceExists);
boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME);
assertEquals("#createNamespaceAndTable failed to create table.", assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists);
true, tableExists);
admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.disableTable(HelloHBase.MY_TABLE_NAME);
admin.deleteTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME);
@ -100,8 +94,7 @@ public class TestHelloHBase {
public void testPutRowToTable() throws IOException { public void testPutRowToTable() throws IOException {
Admin admin = TEST_UTIL.getAdmin(); Admin admin = TEST_UTIL.getAdmin();
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
Table table Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
HelloHBase.putRowToTable(table); HelloHBase.putRowToTable(table);
Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
@ -115,13 +108,10 @@ public class TestHelloHBase {
public void testDeleteRow() throws IOException { public void testDeleteRow() throws IOException {
Admin admin = TEST_UTIL.getAdmin(); Admin admin = TEST_UTIL.getAdmin();
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
Table table Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
table.put(new Put(HelloHBase.MY_ROW_ID). table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz")));
HelloHBase.MY_FIRST_COLUMN_QUALIFIER,
Bytes.toBytes("xyz")));
HelloHBase.deleteRow(table); HelloHBase.deleteRow(table);
Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
assertEquals("#deleteRow failed to delete row.", true, row.isEmpty()); assertEquals("#deleteRow failed to delete row.", true, row.isEmpty());

View File

@ -1,8 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" <project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation=
"https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!-- <!--
/** /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
@ -24,8 +21,8 @@
--> -->
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<parent> <parent>
<artifactId>hbase-archetypes</artifactId>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-archetypes</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version> <version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>..</relativePath> <relativePath>..</relativePath>
</parent> </parent>
@ -44,16 +41,16 @@
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-testing-util</artifactId> <artifactId>hbase-testing-util</artifactId>
<scope>test</scope> <scope>test</scope>
<exclusions> <exclusions>
<exclusion> <exclusion>
<groupId>javax.xml.bind</groupId> <groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId> <artifactId>jaxb-api</artifactId>
</exclusion> </exclusion>
<exclusion> <exclusion>
<groupId>javax.ws.rs</groupId> <groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId> <artifactId>jsr311-api</artifactId>
</exclusion> </exclusion>
</exclusions> </exclusions>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>

View File

@ -1,5 +1,4 @@
/** /*
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -37,19 +36,17 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
* Successful running of this application requires access to an active instance * Successful running of this application requires access to an active instance of HBase. For
* of HBase. For install instructions for a standalone instance of HBase, please * install instructions for a standalone instance of HBase, please refer to
* refer to https://hbase.apache.org/book.html#quickstart * https://hbase.apache.org/book.html#quickstart
*/ */
public final class HelloHBase { public final class HelloHBase {
protected static final String MY_NAMESPACE_NAME = "myTestNamespace"; protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable"); static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf"); static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
static final byte[] MY_FIRST_COLUMN_QUALIFIER static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn");
= Bytes.toBytes("myFirstColumn"); static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn");
static final byte[] MY_SECOND_COLUMN_QUALIFIER
= Bytes.toBytes("mySecondColumn");
static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01"); static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01");
// Private constructor included here to avoid checkstyle warnings // Private constructor included here to avoid checkstyle warnings
@ -60,20 +57,20 @@ public final class HelloHBase {
final boolean deleteAllAtEOJ = true; final boolean deleteAllAtEOJ = true;
/** /**
* ConnectionFactory#createConnection() automatically looks for * ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase
* hbase-site.xml (HBase configuration parameters) on the system's * configuration parameters) on the system's CLASSPATH, to enable creation of Connection to
* CLASSPATH, to enable creation of Connection to HBase via ZooKeeper. * HBase via ZooKeeper.
*/ */
try (Connection connection = ConnectionFactory.createConnection(); try (Connection connection = ConnectionFactory.createConnection();
Admin admin = connection.getAdmin()) { Admin admin = connection.getAdmin()) {
admin.getClusterMetrics(); // assure connection successfully established admin.getClusterMetrics(); // assure connection successfully established
System.out.println("\n*** Hello HBase! -- Connection has been " System.out
+ "established via ZooKeeper!!\n"); .println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n");
createNamespaceAndTable(admin); createNamespaceAndTable(admin);
System.out.println("Getting a Table object for [" + MY_TABLE_NAME System.out.println("Getting a Table object for [" + MY_TABLE_NAME
+ "] with which to perform CRUD operations in HBase."); + "] with which to perform CRUD operations in HBase.");
try (Table table = connection.getTable(MY_TABLE_NAME)) { try (Table table = connection.getTable(MY_TABLE_NAME)) {
putRowToTable(table); putRowToTable(table);
@ -91,9 +88,8 @@ public final class HelloHBase {
} }
/** /**
* Invokes Admin#createNamespace and Admin#createTable to create a namespace * Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has
* with a table that has one column-family. * one column-family.
*
* @param admin Standard Admin object * @param admin Standard Admin object
* @throws IOException If IO problem encountered * @throws IOException If IO problem encountered
*/ */
@ -102,13 +98,11 @@ public final class HelloHBase {
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) { if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "]."); System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
admin.createNamespace(NamespaceDescriptor admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build());
.create(MY_NAMESPACE_NAME).build());
} }
if (!admin.tableExists(MY_TABLE_NAME)) { if (!admin.tableExists(MY_TABLE_NAME)) {
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString() System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
+ "], with one Column Family [" + "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
admin.createTable(TableDescriptorBuilder.newBuilder(MY_TABLE_NAME) admin.createTable(TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build()); .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build());
@ -116,33 +110,26 @@ public final class HelloHBase {
} }
/** /**
* Invokes Table#put to store a row (with two new columns created 'on the * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table.
* fly') into the table.
*
* @param table Standard Table object (used for CRUD operations). * @param table Standard Table object (used for CRUD operations).
* @throws IOException If IO problem encountered * @throws IOException If IO problem encountered
*/ */
static void putRowToTable(final Table table) throws IOException { static void putRowToTable(final Table table) throws IOException {
table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, table.put(new Put(MY_ROW_ID)
MY_FIRST_COLUMN_QUALIFIER, .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello"))
Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!")));
MY_SECOND_COLUMN_QUALIFIER,
Bytes.toBytes("World!")));
System.out.println("Row [" + Bytes.toString(MY_ROW_ID) System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table ["
+ "] was put into Table [" + table.getName().getNameAsString() + "] in HBase;\n"
+ table.getName().getNameAsString() + "] in HBase;\n" + " the row's two columns (created 'on the fly') are: ["
+ " the row's two columns (created 'on the fly') are: [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
+ Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
} }
/** /**
* Invokes Table#get and prints out the contents of the retrieved row. * Invokes Table#get and prints out the contents of the retrieved row.
*
* @param table Standard Table object * @param table Standard Table object
* @throws IOException If IO problem encountered * @throws IOException If IO problem encountered
*/ */
@ -150,38 +137,32 @@ public final class HelloHBase {
Result row = table.get(new Get(MY_ROW_ID)); Result row = table.get(new Get(MY_ROW_ID));
System.out.println("Row [" + Bytes.toString(row.getRow()) System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table ["
+ "] was retrieved from Table [" + table.getName().getNameAsString() + "] in HBase, with the following content:");
+ table.getName().getNameAsString()
+ "] in HBase, with the following content:");
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap()
: row.getNoVersionMap().entrySet()) { .entrySet()) {
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
System.out.println(" Columns in Column Family [" + columnFamilyName System.out.println(" Columns in Column Family [" + columnFamilyName + "]:");
+ "]:");
for (Entry<byte[], byte[]> columnNameAndValueMap for (Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) {
: colFamilyEntry.getValue().entrySet()) {
System.out.println(" Value of Column [" + columnFamilyName + ":" System.out.println(" Value of Column [" + columnFamilyName + ":"
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
+ Bytes.toString(columnNameAndValueMap.getValue())); + Bytes.toString(columnNameAndValueMap.getValue()));
} }
} }
} }
/** /**
* Checks to see whether a namespace exists. * Checks to see whether a namespace exists.
* * @param admin Standard Admin object
* @param admin Standard Admin object
* @param namespaceName Name of namespace * @param namespaceName Name of namespace
* @return true If namespace exists * @return true If namespace exists
* @throws IOException If IO problem encountered * @throws IOException If IO problem encountered
*/ */
static boolean namespaceExists(final Admin admin, final String namespaceName) static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException {
throws IOException {
try { try {
admin.getNamespaceDescriptor(namespaceName); admin.getNamespaceDescriptor(namespaceName);
} catch (NamespaceNotFoundException e) { } catch (NamespaceNotFoundException e) {
@ -192,28 +173,24 @@ public final class HelloHBase {
/** /**
* Invokes Table#delete to delete test data (i.e. the row) * Invokes Table#delete to delete test data (i.e. the row)
*
* @param table Standard Table object * @param table Standard Table object
* @throws IOException If IO problem is encountered * @throws IOException If IO problem is encountered
*/ */
static void deleteRow(final Table table) throws IOException { static void deleteRow(final Table table) throws IOException {
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table ["
+ "] from Table [" + table.getName().getNameAsString() + "].");
+ table.getName().getNameAsString() + "].");
table.delete(new Delete(MY_ROW_ID)); table.delete(new Delete(MY_ROW_ID));
} }
/** /**
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete
* disable/delete Table and delete Namespace. * Table and delete Namespace.
*
* @param admin Standard Admin object * @param admin Standard Admin object
* @throws IOException If IO problem is encountered * @throws IOException If IO problem is encountered
*/ */
static void deleteNamespaceAndTable(final Admin admin) throws IOException { static void deleteNamespaceAndTable(final Admin admin) throws IOException {
if (admin.tableExists(MY_TABLE_NAME)) { if (admin.tableExists(MY_TABLE_NAME)) {
System.out.println("Disabling/deleting Table [" System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "].");
+ MY_TABLE_NAME.getNameAsString() + "].");
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it. admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
admin.deleteTable(MY_TABLE_NAME); admin.deleteTable(MY_TABLE_NAME);
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -44,10 +44,9 @@ public class TestHelloHBase {
@ClassRule @ClassRule
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestHelloHBase.class); HBaseClassTestRule.forClass(TestHelloHBase.class);
private static final HBaseTestingUtil TEST_UTIL private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
= new HBaseTestingUtil();
@BeforeClass @BeforeClass
public static void beforeClass() throws Exception { public static void beforeClass() throws Exception {
@ -67,13 +66,11 @@ public class TestHelloHBase {
Admin admin = TEST_UTIL.getAdmin(); Admin admin = TEST_UTIL.getAdmin();
exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE); exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE);
assertEquals("#namespaceExists failed: found nonexistent namespace.", assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists);
false, exists);
admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build()); admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build());
exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE); exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE);
assertEquals("#namespaceExists failed: did NOT find existing namespace.", assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists);
true, exists);
admin.deleteNamespace(EXISTING_NAMESPACE); admin.deleteNamespace(EXISTING_NAMESPACE);
} }
@ -82,14 +79,11 @@ public class TestHelloHBase {
Admin admin = TEST_UTIL.getAdmin(); Admin admin = TEST_UTIL.getAdmin();
HelloHBase.createNamespaceAndTable(admin); HelloHBase.createNamespaceAndTable(admin);
boolean namespaceExists boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
= HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists);
assertEquals("#createNamespaceAndTable failed to create namespace.",
true, namespaceExists);
boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME);
assertEquals("#createNamespaceAndTable failed to create table.", assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists);
true, tableExists);
admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.disableTable(HelloHBase.MY_TABLE_NAME);
admin.deleteTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME);
@ -100,8 +94,7 @@ public class TestHelloHBase {
public void testPutRowToTable() throws IOException { public void testPutRowToTable() throws IOException {
Admin admin = TEST_UTIL.getAdmin(); Admin admin = TEST_UTIL.getAdmin();
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
Table table Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
HelloHBase.putRowToTable(table); HelloHBase.putRowToTable(table);
Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
@ -115,13 +108,10 @@ public class TestHelloHBase {
public void testDeleteRow() throws IOException { public void testDeleteRow() throws IOException {
Admin admin = TEST_UTIL.getAdmin(); Admin admin = TEST_UTIL.getAdmin();
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
Table table Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
table.put(new Put(HelloHBase.MY_ROW_ID). table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz")));
HelloHBase.MY_FIRST_COLUMN_QUALIFIER,
Bytes.toBytes("xyz")));
HelloHBase.deleteRow(table); HelloHBase.deleteRow(table);
Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
assertEquals("#deleteRow failed to delete row.", true, row.isEmpty()); assertEquals("#deleteRow failed to delete row.", true, row.isEmpty());

View File

@ -1,6 +1,5 @@
<?xml version="1.0"?> <?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" <project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!-- <!--
/** /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
@ -22,8 +21,8 @@
--> -->
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<parent> <parent>
<artifactId>hbase-build-configuration</artifactId>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-build-configuration</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version> <version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>../hbase-build-configuration</relativePath> <relativePath>../hbase-build-configuration</relativePath>
</parent> </parent>
@ -68,10 +67,10 @@
<artifactId>spotbugs-maven-plugin</artifactId> <artifactId>spotbugs-maven-plugin</artifactId>
<executions> <executions>
<execution> <execution>
<inherited>false</inherited>
<goals> <goals>
<goal>spotbugs</goal> <goal>spotbugs</goal>
</goals> </goals>
<inherited>false</inherited>
<configuration> <configuration>
<excludeFilterFile>${project.basedir}/../dev-support/spotbugs-exclude.xml</excludeFilterFile> <excludeFilterFile>${project.basedir}/../dev-support/spotbugs-exclude.xml</excludeFilterFile>
</configuration> </configuration>

View File

@ -1,4 +1,4 @@
<?xml version="1.0"?> <?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> <project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!-- <!--
/** /**
@ -21,160 +21,18 @@
--> -->
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<parent> <parent>
<artifactId>hbase-build-configuration</artifactId>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-build-configuration</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version> <version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>../hbase-build-configuration</relativePath> <relativePath>../hbase-build-configuration</relativePath>
</parent> </parent>
<artifactId>hbase-assembly</artifactId> <artifactId>hbase-assembly</artifactId>
<name>Apache HBase - Assembly</name>
<description>
Module that does project assembly and that is all that it does.
</description>
<packaging>pom</packaging> <packaging>pom</packaging>
<name>Apache HBase - Assembly</name>
<description>Module that does project assembly and that is all that it does.</description>
<properties> <properties>
<license.bundles.dependencies>true</license.bundles.dependencies> <license.bundles.dependencies>true</license.bundles.dependencies>
</properties> </properties>
<build>
<plugins>
<!-- licensing info from our dependencies -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-remote-resources-plugin</artifactId>
<executions>
<execution>
<id>aggregate-licenses</id>
<goals>
<goal>process</goal>
</goals>
<configuration>
<properties>
<copyright-end-year>${build.year}</copyright-end-year>
<debug-print-included-work-info>${license.debug.print.included}</debug-print-included-work-info>
<bundled-dependencies>${license.bundles.dependencies}</bundled-dependencies>
<bundled-jquery>${license.bundles.jquery}</bundled-jquery>
<bundled-vega>${license.bundles.vega}</bundled-vega>
<bundled-logo>${license.bundles.logo}</bundled-logo>
<bundled-bootstrap>${license.bundles.bootstrap}</bundled-bootstrap>
</properties>
<resourceBundles>
<resourceBundle>${project.groupId}:hbase-resource-bundle:${project.version}</resourceBundle>
</resourceBundles>
<supplementalModelArtifacts>
<supplementalModelArtifact>${project.groupId}:hbase-resource-bundle:${project.version}</supplementalModelArtifact>
</supplementalModelArtifacts>
<supplementalModels>
<supplementalModel>supplemental-models.xml</supplementalModel>
</supplementalModels>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<!--Else will use hbase-assembly as final name.-->
<finalName>hbase-${project.version}</finalName>
<skipAssembly>false</skipAssembly>
<appendAssemblyId>true</appendAssemblyId>
<tarLongFileMode>posix</tarLongFileMode>
<descriptors>
<descriptor>${assembly.file}</descriptor>
<descriptor>src/main/assembly/client.xml</descriptor>
</descriptors>
</configuration>
</plugin>
<plugin>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<!-- generates the file that will be used by the bin/hbase script in the dev env -->
<id>create-hbase-generated-classpath</id>
<phase>test</phase>
<goals>
<goal>build-classpath</goal>
</goals>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath.txt</outputFile>
<excludeArtifactIds>jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce</excludeArtifactIds>
</configuration>
</execution>
<execution>
<!-- generates the file that will be used by the bin/hbase zkcli script in the dev env -->
<id>create-hbase-generated-classpath-jline</id>
<phase>test</phase>
<goals>
<goal>build-classpath</goal>
</goals>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath_jline.txt</outputFile>
<includeArtifactIds>jline</includeArtifactIds>
</configuration>
</execution>
<execution>
<!-- generates the file that will be used by the bin/hbase shell script in the dev env -->
<id>create-hbase-generated-classpath-jruby</id>
<phase>test</phase>
<goals>
<goal>build-classpath</goal>
</goals>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath_jruby.txt</outputFile>
<includeArtifactIds>jruby-complete</includeArtifactIds>
</configuration>
</execution>
<!--
Build an aggregation of our templated NOTICE file and the NOTICE files in our dependencies.
If MASSEMBLY-382 is fixed we could do this in the assembly
Currently relies on env, bash, find, and cat.
-->
<execution>
<!-- put all of the NOTICE files out of our dependencies -->
<id>unpack-dependency-notices</id>
<phase>prepare-package</phase>
<goals>
<goal>unpack-dependencies</goal>
</goals>
<configuration>
<excludeTypes>pom</excludeTypes>
<useSubDirectoryPerArtifact>true</useSubDirectoryPerArtifact>
<includes>**\/NOTICE,**\/NOTICE.txt</includes>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>${exec.maven.version}</version>
<executions>
<execution>
<id>concat-NOTICE-files</id>
<phase>package</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<executable>env</executable>
<arguments>
<argument>bash</argument>
<argument>-c</argument>
<argument>cat maven-shared-archive-resources/META-INF/NOTICE \
`find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt`
</argument>
</arguments>
<outputFile>${project.build.directory}/NOTICE.aggregate</outputFile>
<workingDirectory>${project.build.directory}</workingDirectory>
</configuration>
</execution>
</executions>
</plugin>
<!-- /end building aggregation of NOTICE files -->
</plugins>
</build>
<dependencies> <dependencies>
<!-- client artifacts for downstream use --> <!-- client artifacts for downstream use -->
<dependency> <dependency>
@ -189,7 +47,7 @@
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-shaded-mapreduce</artifactId> <artifactId>hbase-shaded-mapreduce</artifactId>
</dependency> </dependency>
<!-- Intra-project dependencies --> <!-- Intra-project dependencies -->
<dependency> <dependency>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-it</artifactId> <artifactId>hbase-it</artifactId>
@ -254,25 +112,25 @@
<artifactId>hbase-external-blockcache</artifactId> <artifactId>hbase-external-blockcache</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-testing-util</artifactId> <artifactId>hbase-testing-util</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-metrics-api</artifactId> <artifactId>hbase-metrics-api</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-metrics</artifactId> <artifactId>hbase-metrics</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-protocol-shaded</artifactId> <artifactId>hbase-protocol-shaded</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-resource-bundle</artifactId> <artifactId>hbase-resource-bundle</artifactId>
<optional>true</optional> <optional>true</optional>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.httpcomponents</groupId> <groupId>org.apache.httpcomponents</groupId>
@ -390,4 +248,143 @@
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
</dependencies> </dependencies>
<build>
<plugins>
<!-- licensing info from our dependencies -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-remote-resources-plugin</artifactId>
<executions>
<execution>
<id>aggregate-licenses</id>
<goals>
<goal>process</goal>
</goals>
<configuration>
<properties>
<copyright-end-year>${build.year}</copyright-end-year>
<debug-print-included-work-info>${license.debug.print.included}</debug-print-included-work-info>
<bundled-dependencies>${license.bundles.dependencies}</bundled-dependencies>
<bundled-jquery>${license.bundles.jquery}</bundled-jquery>
<bundled-vega>${license.bundles.vega}</bundled-vega>
<bundled-logo>${license.bundles.logo}</bundled-logo>
<bundled-bootstrap>${license.bundles.bootstrap}</bundled-bootstrap>
</properties>
<resourceBundles>
<resourceBundle>${project.groupId}:hbase-resource-bundle:${project.version}</resourceBundle>
</resourceBundles>
<supplementalModelArtifacts>
<supplementalModelArtifact>${project.groupId}:hbase-resource-bundle:${project.version}</supplementalModelArtifact>
</supplementalModelArtifacts>
<supplementalModels>
<supplementalModel>supplemental-models.xml</supplementalModel>
</supplementalModels>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<!--Else will use hbase-assembly as final name.-->
<finalName>hbase-${project.version}</finalName>
<skipAssembly>false</skipAssembly>
<appendAssemblyId>true</appendAssemblyId>
<tarLongFileMode>posix</tarLongFileMode>
<descriptors>
<descriptor>${assembly.file}</descriptor>
<descriptor>src/main/assembly/client.xml</descriptor>
</descriptors>
</configuration>
</plugin>
<plugin>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<!-- generates the file that will be used by the bin/hbase script in the dev env -->
<id>create-hbase-generated-classpath</id>
<goals>
<goal>build-classpath</goal>
</goals>
<phase>test</phase>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath.txt</outputFile>
<excludeArtifactIds>jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce</excludeArtifactIds>
</configuration>
</execution>
<execution>
<!-- generates the file that will be used by the bin/hbase zkcli script in the dev env -->
<id>create-hbase-generated-classpath-jline</id>
<goals>
<goal>build-classpath</goal>
</goals>
<phase>test</phase>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath_jline.txt</outputFile>
<includeArtifactIds>jline</includeArtifactIds>
</configuration>
</execution>
<execution>
<!-- generates the file that will be used by the bin/hbase shell script in the dev env -->
<id>create-hbase-generated-classpath-jruby</id>
<goals>
<goal>build-classpath</goal>
</goals>
<phase>test</phase>
<configuration>
<outputFile>${project.parent.basedir}/target/cached_classpath_jruby.txt</outputFile>
<includeArtifactIds>jruby-complete</includeArtifactIds>
</configuration>
</execution>
<!--
Build an aggregation of our templated NOTICE file and the NOTICE files in our dependencies.
If MASSEMBLY-382 is fixed we could do this in the assembly
Currently relies on env, bash, find, and cat.
-->
<execution>
<!-- put all of the NOTICE files out of our dependencies -->
<id>unpack-dependency-notices</id>
<goals>
<goal>unpack-dependencies</goal>
</goals>
<phase>prepare-package</phase>
<configuration>
<excludeTypes>pom</excludeTypes>
<useSubDirectoryPerArtifact>true</useSubDirectoryPerArtifact>
<includes>**\/NOTICE,**\/NOTICE.txt</includes>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>${exec.maven.version}</version>
<executions>
<execution>
<id>concat-NOTICE-files</id>
<goals>
<goal>exec</goal>
</goals>
<phase>package</phase>
<configuration>
<executable>env</executable>
<arguments>
<argument>bash</argument>
<argument>-c</argument>
<argument>cat maven-shared-archive-resources/META-INF/NOTICE \
`find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt`</argument>
</arguments>
<outputFile>${project.build.directory}/NOTICE.aggregate</outputFile>
<workingDirectory>${project.build.directory}</workingDirectory>
</configuration>
</execution>
</executions>
</plugin>
<!-- /end building aggregation of NOTICE files -->
</plugins>
</build>
</project> </project>

View File

@ -1,6 +1,5 @@
<?xml version="1.0"?> <?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" <project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!-- <!--
/** /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
@ -22,8 +21,8 @@
--> -->
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<parent> <parent>
<artifactId>hbase-build-configuration</artifactId>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-build-configuration</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version> <version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>../hbase-build-configuration</relativePath> <relativePath>../hbase-build-configuration</relativePath>
</parent> </parent>
@ -31,33 +30,6 @@
<artifactId>hbase-asyncfs</artifactId> <artifactId>hbase-asyncfs</artifactId>
<name>Apache HBase - Asynchronous FileSystem</name> <name>Apache HBase - Asynchronous FileSystem</name>
<description>HBase Asynchronous FileSystem Implementation for WAL</description> <description>HBase Asynchronous FileSystem Implementation for WAL</description>
<build>
<plugins>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<plugin>
<groupId>net.revelc.code</groupId>
<artifactId>warbucks-maven-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<configuration>
<failOnViolation>true</failOnViolation>
</configuration>
</plugin>
</plugins>
</build>
<dependencies> <dependencies>
<dependency> <dependency>
@ -169,13 +141,42 @@
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
</dependencies> </dependencies>
<build>
<plugins>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<plugin>
<groupId>net.revelc.code</groupId>
<artifactId>warbucks-maven-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<configuration>
<failOnViolation>true</failOnViolation>
</configuration>
</plugin>
</plugins>
</build>
<profiles> <profiles>
<!-- Profiles for building against different hadoop versions --> <!-- Profiles for building against different hadoop versions -->
<profile> <profile>
<id>hadoop-3.0</id> <id>hadoop-3.0</id>
<activation> <activation>
<property><name>!hadoop.profile</name></property> <property>
<name>!hadoop.profile</name>
</property>
</activation> </activation>
<dependencies> <dependencies>
<dependency> <dependency>
@ -224,8 +225,7 @@
<artifactId>lifecycle-mapping</artifactId> <artifactId>lifecycle-mapping</artifactId>
<configuration> <configuration>
<lifecycleMappingMetadata> <lifecycleMappingMetadata>
<pluginExecutions> <pluginExecutions/>
</pluginExecutions>
</lifecycleMappingMetadata> </lifecycleMappingMetadata>
</configuration> </configuration>
</plugin> </plugin>

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -21,10 +21,9 @@ import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.yetus.audience.InterfaceAudience;
/** /**
* Interface for asynchronous filesystem output stream. * Interface for asynchronous filesystem output stream.

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -47,9 +47,9 @@ public final class AsyncFSOutputHelper {
* implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}. * implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}.
*/ */
public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite, public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite,
boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup,
Class<? extends Channel> channelClass, StreamSlowMonitor monitor) Class<? extends Channel> channelClass, StreamSlowMonitor monitor)
throws IOException, CommonFSUtils.StreamLacksCapabilityException { throws IOException, CommonFSUtils.StreamLacksCapabilityException {
if (fs instanceof DistributedFileSystem) { if (fs instanceof DistributedFileSystem) {
return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f, return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f,
overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor); overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor);

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -180,7 +180,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
// State for connections to DN // State for connections to DN
private enum State { private enum State {
STREAMING, CLOSING, BROKEN, CLOSED STREAMING,
CLOSING,
BROKEN,
CLOSED
} }
private volatile State state; private volatile State state;
@ -196,7 +199,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
if (c.unfinishedReplicas.remove(channel.id())) { if (c.unfinishedReplicas.remove(channel.id())) {
long current = EnvironmentEdgeManager.currentTime(); long current = EnvironmentEdgeManager.currentTime();
streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen, streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen,
current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size()); current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size());
c.lastAckTimestamp = current; c.lastAckTimestamp = current;
if (c.unfinishedReplicas.isEmpty()) { if (c.unfinishedReplicas.isEmpty()) {
// we need to remove first before complete the future. It is possible that after we // we need to remove first before complete the future. It is possible that after we
@ -284,13 +287,13 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception { protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception {
Status reply = getStatus(ack); Status reply = getStatus(ack);
if (reply != Status.SUCCESS) { if (reply != Status.SUCCESS) {
failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + block
block + " from datanode " + ctx.channel().remoteAddress())); + " from datanode " + ctx.channel().remoteAddress()));
return; return;
} }
if (PipelineAck.isRestartOOBStatus(reply)) { if (PipelineAck.isRestartOOBStatus(reply)) {
failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block "
block + " from datanode " + ctx.channel().remoteAddress())); + block + " from datanode " + ctx.channel().remoteAddress()));
return; return;
} }
if (ack.getSeqno() == HEART_BEAT_SEQNO) { if (ack.getSeqno() == HEART_BEAT_SEQNO) {
@ -345,10 +348,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
} }
} }
FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs, FanOutOneBlockAsyncDFSOutput(Configuration conf, DistributedFileSystem dfs, DFSClient client,
DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId, ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock,
LocatedBlock locatedBlock, Encryptor encryptor, Map<Channel, DatanodeInfo> datanodeInfoMap, Encryptor encryptor, Map<Channel, DatanodeInfo> datanodeInfoMap, DataChecksum summer,
DataChecksum summer, ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) { ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) {
this.conf = conf; this.conf = conf;
this.dfs = dfs; this.dfs = dfs;
this.client = client; this.client = client;
@ -403,7 +406,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
} }
private void flushBuffer(CompletableFuture<Long> future, ByteBuf dataBuf, private void flushBuffer(CompletableFuture<Long> future, ByteBuf dataBuf,
long nextPacketOffsetInBlock, boolean syncBlock) { long nextPacketOffsetInBlock, boolean syncBlock) {
int dataLen = dataBuf.readableBytes(); int dataLen = dataBuf.readableBytes();
int chunkLen = summer.getBytesPerChecksum(); int chunkLen = summer.getBytesPerChecksum();
int trailingPartialChunkLen = dataLen % chunkLen; int trailingPartialChunkLen = dataLen % chunkLen;
@ -413,13 +416,13 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen)); summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
checksumBuf.writerIndex(checksumLen); checksumBuf.writerIndex(checksumLen);
PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock,
nextPacketSeqno, false, dataLen, syncBlock); nextPacketSeqno, false, dataLen, syncBlock);
int headerLen = header.getSerializedSize(); int headerLen = header.getSerializedSize();
ByteBuf headerBuf = alloc.buffer(headerLen); ByteBuf headerBuf = alloc.buffer(headerLen);
header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
headerBuf.writerIndex(headerLen); headerBuf.writerIndex(headerLen);
Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen, Callback c =
datanodeInfoMap.keySet(), dataLen); new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeInfoMap.keySet(), dataLen);
waitingAckQueue.addLast(c); waitingAckQueue.addLast(c);
// recheck again after we pushed the callback to queue // recheck again after we pushed the callback to queue
if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) { if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) {
@ -429,7 +432,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
return; return;
} }
// TODO: we should perhaps measure time taken per DN here; // TODO: we should perhaps measure time taken per DN here;
// we could collect statistics per DN, and/or exclude bad nodes in createOutput. // we could collect statistics per DN, and/or exclude bad nodes in createOutput.
datanodeInfoMap.keySet().forEach(ch -> { datanodeInfoMap.keySet().forEach(ch -> {
ch.write(headerBuf.retainedDuplicate()); ch.write(headerBuf.retainedDuplicate());
ch.write(checksumBuf.retainedDuplicate()); ch.write(checksumBuf.retainedDuplicate());
@ -514,7 +517,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
} }
trailingPartialChunkLength = dataLen % summer.getBytesPerChecksum(); trailingPartialChunkLength = dataLen % summer.getBytesPerChecksum();
ByteBuf newBuf = alloc.directBuffer(sendBufSizePRedictor.guess(dataLen)) ByteBuf newBuf = alloc.directBuffer(sendBufSizePRedictor.guess(dataLen))
.ensureWritable(trailingPartialChunkLength); .ensureWritable(trailingPartialChunkLength);
if (trailingPartialChunkLength != 0) { if (trailingPartialChunkLength != 0) {
buf.readerIndex(dataLen - trailingPartialChunkLength).readBytes(newBuf, buf.readerIndex(dataLen - trailingPartialChunkLength).readBytes(newBuf,
trailingPartialChunkLength); trailingPartialChunkLength);

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -116,7 +116,7 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
@InterfaceAudience.Private @InterfaceAudience.Private
public final class FanOutOneBlockAsyncDFSOutputHelper { public final class FanOutOneBlockAsyncDFSOutputHelper {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class); LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class);
private FanOutOneBlockAsyncDFSOutputHelper() { private FanOutOneBlockAsyncDFSOutputHelper() {
} }
@ -145,9 +145,8 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
// helper class for creating files. // helper class for creating files.
private interface FileCreator { private interface FileCreator {
default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission masked, default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication,
short replication, long blockSize, CryptoProtocolVersion[] supportedVersions) long blockSize, CryptoProtocolVersion[] supportedVersions) throws Exception {
throws Exception {
try { try {
return (HdfsFileStatus) createObject(instance, src, masked, clientName, flag, createParent, return (HdfsFileStatus) createObject(instance, src, masked, clientName, flag, createParent,
replication, blockSize, supportedVersions); replication, blockSize, supportedVersions);
@ -161,15 +160,15 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
} }
Object createObject(ClientProtocol instance, String src, FsPermission masked, String clientName, Object createObject(ClientProtocol instance, String src, FsPermission masked, String clientName,
EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions) throws Exception; CryptoProtocolVersion[] supportedVersions) throws Exception;
} }
private static final FileCreator FILE_CREATOR; private static final FileCreator FILE_CREATOR;
private static LeaseManager createLeaseManager() throws NoSuchMethodException { private static LeaseManager createLeaseManager() throws NoSuchMethodException {
Method beginFileLeaseMethod = Method beginFileLeaseMethod =
DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class); DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class);
beginFileLeaseMethod.setAccessible(true); beginFileLeaseMethod.setAccessible(true);
Method endFileLeaseMethod = DFSClient.class.getDeclaredMethod("endFileLease", long.class); Method endFileLeaseMethod = DFSClient.class.getDeclaredMethod("endFileLease", long.class);
endFileLeaseMethod.setAccessible(true); endFileLeaseMethod.setAccessible(true);
@ -197,13 +196,13 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
private static FileCreator createFileCreator3_3() throws NoSuchMethodException { private static FileCreator createFileCreator3_3() throws NoSuchMethodException {
Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class, Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class,
String.class, EnumSetWritable.class, boolean.class, short.class, long.class, String.class, EnumSetWritable.class, boolean.class, short.class, long.class,
CryptoProtocolVersion[].class, String.class, String.class); CryptoProtocolVersion[].class, String.class, String.class);
return (instance, src, masked, clientName, flag, createParent, replication, blockSize, return (instance, src, masked, clientName, flag, createParent, replication, blockSize,
supportedVersions) -> { supportedVersions) -> {
return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag,
createParent, replication, blockSize, supportedVersions, null, null); createParent, replication, blockSize, supportedVersions, null, null);
}; };
} }
@ -213,7 +212,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
CryptoProtocolVersion[].class, String.class); CryptoProtocolVersion[].class, String.class);
return (instance, src, masked, clientName, flag, createParent, replication, blockSize, return (instance, src, masked, clientName, flag, createParent, replication, blockSize,
supportedVersions) -> { supportedVersions) -> {
return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag,
createParent, replication, blockSize, supportedVersions, null); createParent, replication, blockSize, supportedVersions, null);
}; };
@ -249,9 +248,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
LEASE_MANAGER = createLeaseManager(); LEASE_MANAGER = createLeaseManager();
FILE_CREATOR = createFileCreator(); FILE_CREATOR = createFileCreator();
} catch (Exception e) { } catch (Exception e) {
String msg = "Couldn't properly initialize access to HDFS internals. Please " + String msg = "Couldn't properly initialize access to HDFS internals. Please "
"update your WAL Provider to not make use of the 'asyncfs' provider. See " + + "update your WAL Provider to not make use of the 'asyncfs' provider. See "
"HBASE-16110 for more information."; + "HBASE-16110 for more information.";
LOG.error(msg, e); LOG.error(msg, e);
throw new Error(msg, e); throw new Error(msg, e);
} }
@ -282,7 +281,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
} }
private static void processWriteBlockResponse(Channel channel, DatanodeInfo dnInfo, private static void processWriteBlockResponse(Channel channel, DatanodeInfo dnInfo,
Promise<Channel> promise, int timeoutMs) { Promise<Channel> promise, int timeoutMs) {
channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS),
new ProtobufVarint32FrameDecoder(), new ProtobufVarint32FrameDecoder(),
new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()), new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()),
@ -290,7 +289,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
@Override @Override
protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp) protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp)
throws Exception { throws Exception {
Status pipelineStatus = resp.getStatus(); Status pipelineStatus = resp.getStatus();
if (PipelineAck.isRestartOOBStatus(pipelineStatus)) { if (PipelineAck.isRestartOOBStatus(pipelineStatus)) {
throw new IOException("datanode " + dnInfo + " is restarting"); throw new IOException("datanode " + dnInfo + " is restarting");
@ -298,11 +297,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink(); String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink();
if (resp.getStatus() != Status.SUCCESS) { if (resp.getStatus() != Status.SUCCESS) {
if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) { if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException("Got access token error" + ", status message " + throw new InvalidBlockTokenException("Got access token error" + ", status message "
resp.getMessage() + ", " + logInfo); + resp.getMessage() + ", " + logInfo);
} else { } else {
throw new IOException("Got error" + ", status=" + resp.getStatus().name() + throw new IOException("Got error" + ", status=" + resp.getStatus().name()
", status message " + resp.getMessage() + ", " + logInfo); + ", status message " + resp.getMessage() + ", " + logInfo);
} }
} }
// success // success
@ -329,7 +328,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == READER_IDLE) { if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == READER_IDLE) {
promise promise
.tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response")); .tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response"));
} else { } else {
super.userEventTriggered(ctx, evt); super.userEventTriggered(ctx, evt);
} }
@ -343,7 +342,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
} }
private static void requestWriteBlock(Channel channel, StorageType storageType, private static void requestWriteBlock(Channel channel, StorageType storageType,
OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException {
OpWriteBlockProto proto = OpWriteBlockProto proto =
writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build(); writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build();
int protoLen = proto.getSerializedSize(); int protoLen = proto.getSerializedSize();
@ -356,9 +355,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
} }
private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo, private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo,
StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs, StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs,
DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise) DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise)
throws IOException { throws IOException {
Promise<Void> saslPromise = channel.eventLoop().newPromise(); Promise<Void> saslPromise = channel.eventLoop().newPromise();
trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise); trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
saslPromise.addListener(new FutureListener<Void>() { saslPromise.addListener(new FutureListener<Void>() {
@ -377,13 +376,13 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
} }
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client,
String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS,
BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup,
Class<? extends Channel> channelClass) { Class<? extends Channel> channelClass) {
StorageType[] storageTypes = locatedBlock.getStorageTypes(); StorageType[] storageTypes = locatedBlock.getStorageTypes();
DatanodeInfo[] datanodeInfos = locatedBlock.getLocations(); DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
boolean connectToDnViaHostname = boolean connectToDnViaHostname =
conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
blockCopy.setNumBytes(locatedBlock.getBlockSize()); blockCopy.setNumBytes(locatedBlock.getBlockSize());
@ -392,11 +391,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
.setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))) .setToken(PBHelperClient.convert(locatedBlock.getBlockToken())))
.setClientName(clientName).build(); .setClientName(clientName).build();
ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder() OpWriteBlockProto.Builder writeBlockProtoBuilder =
.setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())) OpWriteBlockProto.newBuilder().setHeader(header)
.setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()) .setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1)
.setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS) .setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd)
.setRequestedChecksum(checksumProto) .setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto)
.setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build()); .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length); List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
for (int i = 0; i < datanodeInfos.length; i++) { for (int i = 0; i < datanodeInfos.length; i++) {
@ -406,26 +405,26 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
futureList.add(promise); futureList.add(promise);
String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname); String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
new Bootstrap().group(eventLoopGroup).channel(channelClass) new Bootstrap().group(eventLoopGroup).channel(channelClass)
.option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() { .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {
@Override @Override
protected void initChannel(Channel ch) throws Exception { protected void initChannel(Channel ch) throws Exception {
// we need to get the remote address of the channel so we can only move on after // we need to get the remote address of the channel so we can only move on after
// channel connected. Leave an empty implementation here because netty does not allow // channel connected. Leave an empty implementation here because netty does not allow
// a null handler. // a null handler.
} }
}).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() { }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {
@Override @Override
public void operationComplete(ChannelFuture future) throws Exception { public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) { if (future.isSuccess()) {
initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder,
timeoutMs, client, locatedBlock.getBlockToken(), promise); timeoutMs, client, locatedBlock.getBlockToken(), promise);
} else { } else {
promise.tryFailure(future.cause()); promise.tryFailure(future.cause());
}
} }
}); }
});
} }
return futureList; return futureList;
} }
@ -453,21 +452,21 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
} }
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src,
boolean overwrite, boolean createParent, short replication, long blockSize, boolean overwrite, boolean createParent, short replication, long blockSize,
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor)
StreamSlowMonitor monitor) throws IOException { throws IOException {
Configuration conf = dfs.getConf(); Configuration conf = dfs.getConf();
DFSClient client = dfs.getClient(); DFSClient client = dfs.getClient();
String clientName = client.getClientName(); String clientName = client.getClientName();
ClientProtocol namenode = client.getNamenode(); ClientProtocol namenode = client.getNamenode();
int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, int createMaxRetries =
DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES); conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager(); ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager();
Set<DatanodeInfo> toExcludeNodes = Set<DatanodeInfo> toExcludeNodes =
new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet()); new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet());
for (int retry = 0;; retry++) { for (int retry = 0;; retry++) {
LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src,
toExcludeNodes, retry); toExcludeNodes, retry);
HdfsFileStatus stat; HdfsFileStatus stat;
try { try {
stat = FILE_CREATOR.create(namenode, src, stat = FILE_CREATOR.create(namenode, src,
@ -556,14 +555,14 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
* inside an {@link EventLoop}. * inside an {@link EventLoop}.
*/ */
public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f, public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f,
boolean overwrite, boolean createParent, short replication, long blockSize, boolean overwrite, boolean createParent, short replication, long blockSize,
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass,
final StreamSlowMonitor monitor) throws IOException { final StreamSlowMonitor monitor) throws IOException {
return new FileSystemLinkResolver<FanOutOneBlockAsyncDFSOutput>() { return new FileSystemLinkResolver<FanOutOneBlockAsyncDFSOutput>() {
@Override @Override
public FanOutOneBlockAsyncDFSOutput doCall(Path p) public FanOutOneBlockAsyncDFSOutput doCall(Path p)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {
return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication, return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication,
blockSize, eventLoopGroup, channelClass, monitor); blockSize, eventLoopGroup, channelClass, monitor);
} }
@ -583,7 +582,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
} }
static void completeFile(DFSClient client, ClientProtocol namenode, String src, String clientName, static void completeFile(DFSClient client, ClientProtocol namenode, String src, String clientName,
ExtendedBlock block, long fileId) { ExtendedBlock block, long fileId) {
for (int retry = 0;; retry++) { for (int retry = 0;; retry++) {
try { try {
if (namenode.complete(src, clientName, block, fileId)) { if (namenode.complete(src, clientName, block, fileId)) {

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -104,7 +104,7 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
@InterfaceAudience.Private @InterfaceAudience.Private
public final class FanOutOneBlockAsyncDFSOutputSaslHelper { public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputSaslHelper.class); LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputSaslHelper.class);
private FanOutOneBlockAsyncDFSOutputSaslHelper() { private FanOutOneBlockAsyncDFSOutputSaslHelper() {
} }
@ -129,21 +129,21 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
private interface TransparentCryptoHelper { private interface TransparentCryptoHelper {
Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client) Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client)
throws IOException; throws IOException;
} }
private static final TransparentCryptoHelper TRANSPARENT_CRYPTO_HELPER; private static final TransparentCryptoHelper TRANSPARENT_CRYPTO_HELPER;
private static SaslAdaptor createSaslAdaptor() private static SaslAdaptor createSaslAdaptor()
throws NoSuchFieldException, NoSuchMethodException { throws NoSuchFieldException, NoSuchMethodException {
Field saslPropsResolverField = Field saslPropsResolverField =
SaslDataTransferClient.class.getDeclaredField("saslPropsResolver"); SaslDataTransferClient.class.getDeclaredField("saslPropsResolver");
saslPropsResolverField.setAccessible(true); saslPropsResolverField.setAccessible(true);
Field trustedChannelResolverField = Field trustedChannelResolverField =
SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver"); SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver");
trustedChannelResolverField.setAccessible(true); trustedChannelResolverField.setAccessible(true);
Field fallbackToSimpleAuthField = Field fallbackToSimpleAuthField =
SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth"); SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth");
fallbackToSimpleAuthField.setAccessible(true); fallbackToSimpleAuthField.setAccessible(true);
return new SaslAdaptor() { return new SaslAdaptor() {
@ -177,7 +177,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
} }
private static TransparentCryptoHelper createTransparentCryptoHelperWithoutHDFS12396() private static TransparentCryptoHelper createTransparentCryptoHelperWithoutHDFS12396()
throws NoSuchMethodException { throws NoSuchMethodException {
Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class
.getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class); .getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class);
decryptEncryptedDataEncryptionKeyMethod.setAccessible(true); decryptEncryptedDataEncryptionKeyMethod.setAccessible(true);
@ -185,7 +185,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
@Override @Override
public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo,
DFSClient client) throws IOException { DFSClient client) throws IOException {
try { try {
KeyVersion decryptedKey = KeyVersion decryptedKey =
(KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo); (KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo);
@ -206,7 +206,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
} }
private static TransparentCryptoHelper createTransparentCryptoHelperWithHDFS12396() private static TransparentCryptoHelper createTransparentCryptoHelperWithHDFS12396()
throws ClassNotFoundException, NoSuchMethodException { throws ClassNotFoundException, NoSuchMethodException {
Class<?> hdfsKMSUtilCls = Class.forName("org.apache.hadoop.hdfs.HdfsKMSUtil"); Class<?> hdfsKMSUtilCls = Class.forName("org.apache.hadoop.hdfs.HdfsKMSUtil");
Method decryptEncryptedDataEncryptionKeyMethod = hdfsKMSUtilCls.getDeclaredMethod( Method decryptEncryptedDataEncryptionKeyMethod = hdfsKMSUtilCls.getDeclaredMethod(
"decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class, KeyProvider.class); "decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class, KeyProvider.class);
@ -215,7 +215,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
@Override @Override
public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo,
DFSClient client) throws IOException { DFSClient client) throws IOException {
try { try {
KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod
.invoke(null, feInfo, client.getKeyProvider()); .invoke(null, feInfo, client.getKeyProvider());
@ -236,12 +236,12 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
} }
private static TransparentCryptoHelper createTransparentCryptoHelper() private static TransparentCryptoHelper createTransparentCryptoHelper()
throws NoSuchMethodException, ClassNotFoundException { throws NoSuchMethodException, ClassNotFoundException {
try { try {
return createTransparentCryptoHelperWithoutHDFS12396(); return createTransparentCryptoHelperWithoutHDFS12396();
} catch (NoSuchMethodException e) { } catch (NoSuchMethodException e) {
LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," + LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient,"
" should be hadoop version with HDFS-12396", e); + " should be hadoop version with HDFS-12396", e);
} }
return createTransparentCryptoHelperWithHDFS12396(); return createTransparentCryptoHelperWithHDFS12396();
} }
@ -252,8 +252,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
TRANSPARENT_CRYPTO_HELPER = createTransparentCryptoHelper(); TRANSPARENT_CRYPTO_HELPER = createTransparentCryptoHelper();
} catch (Exception e) { } catch (Exception e) {
String msg = "Couldn't properly initialize access to HDFS internals. Please " String msg = "Couldn't properly initialize access to HDFS internals. Please "
+ "update your WAL Provider to not make use of the 'asyncfs' provider. See " + "update your WAL Provider to not make use of the 'asyncfs' provider. See "
+ "HBASE-16110 for more information."; + "HBASE-16110 for more information.";
LOG.error(msg, e); LOG.error(msg, e);
throw new Error(msg, e); throw new Error(msg, e);
} }
@ -324,8 +324,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
private int step = 0; private int step = 0;
public SaslNegotiateHandler(Configuration conf, String username, char[] password, public SaslNegotiateHandler(Configuration conf, String username, char[] password,
Map<String, String> saslProps, int timeoutMs, Promise<Void> promise, Map<String, String> saslProps, int timeoutMs, Promise<Void> promise, DFSClient dfsClient)
DFSClient dfsClient) throws SaslException { throws SaslException {
this.conf = conf; this.conf = conf;
this.saslProps = saslProps; this.saslProps = saslProps;
this.saslClient = Sasl.createSaslClient(new String[] { MECHANISM }, username, PROTOCOL, this.saslClient = Sasl.createSaslClient(new String[] { MECHANISM }, username, PROTOCOL,
@ -355,8 +355,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
} }
/** /**
* The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. * The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. After
* After Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*. * Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*.
* Use Reflection to check which ones to use. * Use Reflection to check which ones to use.
*/ */
private static class BuilderPayloadSetter { private static class BuilderPayloadSetter {
@ -366,13 +366,11 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
/** /**
* Create a ByteString from byte array without copying (wrap), and then set it as the payload * Create a ByteString from byte array without copying (wrap), and then set it as the payload
* for the builder. * for the builder.
*
* @param builder builder for HDFS DataTransferEncryptorMessage. * @param builder builder for HDFS DataTransferEncryptorMessage.
* @param payload byte array of payload. * @param payload byte array of payload. n
* @throws IOException
*/ */
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, byte[] payload) static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder,
throws IOException { byte[] payload) throws IOException {
Object byteStringObject; Object byteStringObject;
try { try {
// byteStringObject = new LiteralByteString(payload); // byteStringObject = new LiteralByteString(payload);
@ -396,18 +394,18 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
try { try {
// See if it can load the relocated ByteString, which comes from hadoop-thirdparty. // See if it can load the relocated ByteString, which comes from hadoop-thirdparty.
byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString"); byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString");
LOG.debug("Found relocated ByteString class from hadoop-thirdparty." + LOG.debug("Found relocated ByteString class from hadoop-thirdparty."
" Assuming this is Hadoop 3.3.0+."); + " Assuming this is Hadoop 3.3.0+.");
} catch (ClassNotFoundException e) { } catch (ClassNotFoundException e) {
LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." + LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty."
" Assuming this is below Hadoop 3.3.0", e); + " Assuming this is below Hadoop 3.3.0", e);
} }
// LiteralByteString is a package private class in protobuf. Make it accessible. // LiteralByteString is a package private class in protobuf. Make it accessible.
Class<?> literalByteStringClass; Class<?> literalByteStringClass;
try { try {
literalByteStringClass = Class.forName( literalByteStringClass =
"org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString"); Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString");
LOG.debug("Shaded LiteralByteString from hadoop-thirdparty is found."); LOG.debug("Shaded LiteralByteString from hadoop-thirdparty is found.");
} catch (ClassNotFoundException e) { } catch (ClassNotFoundException e) {
try { try {
@ -435,9 +433,9 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
} }
private void sendSaslMessage(ChannelHandlerContext ctx, byte[] payload, private void sendSaslMessage(ChannelHandlerContext ctx, byte[] payload,
List<CipherOption> options) throws IOException { List<CipherOption> options) throws IOException {
DataTransferEncryptorMessageProto.Builder builder = DataTransferEncryptorMessageProto.Builder builder =
DataTransferEncryptorMessageProto.newBuilder(); DataTransferEncryptorMessageProto.newBuilder();
builder.setStatus(DataTransferEncryptorStatus.SUCCESS); builder.setStatus(DataTransferEncryptorStatus.SUCCESS);
if (payload != null) { if (payload != null) {
BuilderPayloadSetter.wrapAndSetPayload(builder, payload); BuilderPayloadSetter.wrapAndSetPayload(builder, payload);
@ -486,7 +484,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
private boolean requestedQopContainsPrivacy() { private boolean requestedQopContainsPrivacy() {
Set<String> requestedQop = Set<String> requestedQop =
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(","))); ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
return requestedQop.contains("auth-conf"); return requestedQop.contains("auth-conf");
} }
@ -495,15 +493,14 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
throw new IOException("Failed to complete SASL handshake"); throw new IOException("Failed to complete SASL handshake");
} }
Set<String> requestedQop = Set<String> requestedQop =
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(","))); ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
String negotiatedQop = getNegotiatedQop(); String negotiatedQop = getNegotiatedQop();
LOG.debug( LOG.debug(
"Verifying QOP, requested QOP = " + requestedQop + ", negotiated QOP = " + negotiatedQop); "Verifying QOP, requested QOP = " + requestedQop + ", negotiated QOP = " + negotiatedQop);
if (!requestedQop.contains(negotiatedQop)) { if (!requestedQop.contains(negotiatedQop)) {
throw new IOException(String.format("SASL handshake completed, but " throw new IOException(String.format("SASL handshake completed, but "
+ "channel does not have acceptable quality of protection, " + "channel does not have acceptable quality of protection, "
+ "requested = %s, negotiated = %s", + "requested = %s, negotiated = %s", requestedQop, negotiatedQop));
requestedQop, negotiatedQop));
} }
} }
@ -522,13 +519,13 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
outKey = saslClient.unwrap(outKey, 0, outKey.length); outKey = saslClient.unwrap(outKey, 0, outKey.length);
} }
return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(), outKey, return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(), outKey,
option.getOutIv()); option.getOutIv());
} }
private CipherOption getCipherOption(DataTransferEncryptorMessageProto proto, private CipherOption getCipherOption(DataTransferEncryptorMessageProto proto,
boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException { boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException {
List<CipherOption> cipherOptions = List<CipherOption> cipherOptions =
PBHelperClient.convertCipherOptionProtos(proto.getCipherOptionList()); PBHelperClient.convertCipherOptionProtos(proto.getCipherOptionList());
if (cipherOptions == null || cipherOptions.isEmpty()) { if (cipherOptions == null || cipherOptions.isEmpty()) {
return null; return null;
} }
@ -558,7 +555,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
assert response == null; assert response == null;
checkSaslComplete(); checkSaslComplete();
CipherOption cipherOption = CipherOption cipherOption =
getCipherOption(proto, isNegotiatedQopPrivacy(), saslClient); getCipherOption(proto, isNegotiatedQopPrivacy(), saslClient);
ChannelPipeline p = ctx.pipeline(); ChannelPipeline p = ctx.pipeline();
while (p.first() != null) { while (p.first() != null) {
p.removeFirst(); p.removeFirst();
@ -639,7 +636,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
@Override @Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
throws Exception { throws Exception {
if (msg instanceof ByteBuf) { if (msg instanceof ByteBuf) {
ByteBuf buf = (ByteBuf) msg; ByteBuf buf = (ByteBuf) msg;
cBuf.addComponent(buf); cBuf.addComponent(buf);
@ -676,7 +673,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
private final Decryptor decryptor; private final Decryptor decryptor;
public DecryptHandler(CryptoCodec codec, byte[] key, byte[] iv) public DecryptHandler(CryptoCodec codec, byte[] key, byte[] iv)
throws GeneralSecurityException, IOException { throws GeneralSecurityException, IOException {
this.decryptor = codec.createDecryptor(); this.decryptor = codec.createDecryptor();
this.decryptor.init(key, Arrays.copyOf(iv, iv.length)); this.decryptor.init(key, Arrays.copyOf(iv, iv.length));
} }
@ -709,14 +706,14 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
private final Encryptor encryptor; private final Encryptor encryptor;
public EncryptHandler(CryptoCodec codec, byte[] key, byte[] iv) public EncryptHandler(CryptoCodec codec, byte[] key, byte[] iv)
throws GeneralSecurityException, IOException { throws GeneralSecurityException, IOException {
this.encryptor = codec.createEncryptor(); this.encryptor = codec.createEncryptor();
this.encryptor.init(key, Arrays.copyOf(iv, iv.length)); this.encryptor.init(key, Arrays.copyOf(iv, iv.length));
} }
@Override @Override
protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect) protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect)
throws Exception { throws Exception {
if (preferDirect) { if (preferDirect) {
return ctx.alloc().directBuffer(msg.readableBytes()); return ctx.alloc().directBuffer(msg.readableBytes());
} else { } else {
@ -747,7 +744,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
private static String getUserNameFromEncryptionKey(DataEncryptionKey encryptionKey) { private static String getUserNameFromEncryptionKey(DataEncryptionKey encryptionKey) {
return encryptionKey.keyId + NAME_DELIMITER + encryptionKey.blockPoolId + NAME_DELIMITER return encryptionKey.keyId + NAME_DELIMITER + encryptionKey.blockPoolId + NAME_DELIMITER
+ Base64.getEncoder().encodeToString(encryptionKey.nonce); + Base64.getEncoder().encodeToString(encryptionKey.nonce);
} }
private static char[] encryptionKeyToPassword(byte[] encryptionKey) { private static char[] encryptionKeyToPassword(byte[] encryptionKey) {
@ -771,26 +768,26 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
} }
private static void doSaslNegotiation(Configuration conf, Channel channel, int timeoutMs, private static void doSaslNegotiation(Configuration conf, Channel channel, int timeoutMs,
String username, char[] password, Map<String, String> saslProps, Promise<Void> saslPromise, String username, char[] password, Map<String, String> saslProps, Promise<Void> saslPromise,
DFSClient dfsClient) { DFSClient dfsClient) {
try { try {
channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS),
new ProtobufVarint32FrameDecoder(), new ProtobufVarint32FrameDecoder(),
new ProtobufDecoder(DataTransferEncryptorMessageProto.getDefaultInstance()), new ProtobufDecoder(DataTransferEncryptorMessageProto.getDefaultInstance()),
new SaslNegotiateHandler(conf, username, password, saslProps, timeoutMs, saslPromise, new SaslNegotiateHandler(conf, username, password, saslProps, timeoutMs, saslPromise,
dfsClient)); dfsClient));
} catch (SaslException e) { } catch (SaslException e) {
saslPromise.tryFailure(e); saslPromise.tryFailure(e);
} }
} }
static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo, static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo,
int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken, int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken,
Promise<Void> saslPromise) throws IOException { Promise<Void> saslPromise) throws IOException {
SaslDataTransferClient saslClient = client.getSaslDataTransferClient(); SaslDataTransferClient saslClient = client.getSaslDataTransferClient();
SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient); SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient);
TrustedChannelResolver trustedChannelResolver = TrustedChannelResolver trustedChannelResolver =
SASL_ADAPTOR.getTrustedChannelResolver(saslClient); SASL_ADAPTOR.getTrustedChannelResolver(saslClient);
AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient); AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient);
InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress(); InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress();
if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) { if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) {
@ -805,24 +802,23 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
} }
doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey), doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey),
encryptionKeyToPassword(encryptionKey.encryptionKey), encryptionKeyToPassword(encryptionKey.encryptionKey),
createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, client);
client);
} else if (!UserGroupInformation.isSecurityEnabled()) { } else if (!UserGroupInformation.isSecurityEnabled()) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr
+ ", datanodeId = " + dnInfo); + ", datanodeId = " + dnInfo);
} }
saslPromise.trySuccess(null); saslPromise.trySuccess(null);
} else if (dnInfo.getXferPort() < 1024) { } else if (dnInfo.getXferPort() < 1024) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in secured configuration with " LOG.debug("SASL client skipping handshake in secured configuration with "
+ "privileged port for addr = " + addr + ", datanodeId = " + dnInfo); + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
} }
saslPromise.trySuccess(null); saslPromise.trySuccess(null);
} else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) { } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in secured configuration with " LOG.debug("SASL client skipping handshake in secured configuration with "
+ "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo); + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
} }
saslPromise.trySuccess(null); saslPromise.trySuccess(null);
} else if (saslPropsResolver != null) { } else if (saslPropsResolver != null) {
@ -832,21 +828,21 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
} }
doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken), doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken),
buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise, buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise,
client); client);
} else { } else {
// It's a secured cluster using non-privileged ports, but no SASL. The only way this can // It's a secured cluster using non-privileged ports, but no SASL. The only way this can
// happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare // happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare
// edge case. // edge case.
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("SASL client skipping handshake in secured configuration with no SASL " LOG.debug("SASL client skipping handshake in secured configuration with no SASL "
+ "protection configured for addr = " + addr + ", datanodeId = " + dnInfo); + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
} }
saslPromise.trySuccess(null); saslPromise.trySuccess(null);
} }
} }
static Encryptor createEncryptor(Configuration conf, HdfsFileStatus stat, DFSClient client) static Encryptor createEncryptor(Configuration conf, HdfsFileStatus stat, DFSClient client)
throws IOException { throws IOException {
FileEncryptionInfo feInfo = stat.getFileEncryptionInfo(); FileEncryptionInfo feInfo = stat.getFileEncryptionInfo();
if (feInfo == null) { if (feInfo == null) {
return null; return null;

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -17,33 +17,29 @@
*/ */
package org.apache.hadoop.hbase.io.asyncfs; package org.apache.hadoop.hbase.io.asyncfs;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.List;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil;
import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageDecoder; import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageDecoder;
import org.apache.hbase.thirdparty.io.netty.util.internal.ObjectUtil; import org.apache.hbase.thirdparty.io.netty.util.internal.ObjectUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.List;
/** /**
* Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. * Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. The Netty's ProtobufDecode
* The Netty's ProtobufDecode supports unshaded protobuf messages (com.google.protobuf). * supports unshaded protobuf messages (com.google.protobuf). Hadoop 3.3.0 and above relocates
* * protobuf classes to a shaded jar (hadoop-thirdparty), and so we must use reflection to detect
* Hadoop 3.3.0 and above relocates protobuf classes to a shaded jar (hadoop-thirdparty), and * which one (relocated or not) to use. Do not use this to process HBase's shaded protobuf messages.
* so we must use reflection to detect which one (relocated or not) to use. * This is meant to process the protobuf messages in HDFS for the asyncfs use case.
* */
* Do not use this to process HBase's shaded protobuf messages. This is meant to process the
* protobuf messages in HDFS for the asyncfs use case.
* */
@InterfaceAudience.Private @InterfaceAudience.Private
public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> { public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
private static final Logger LOG = private static final Logger LOG = LoggerFactory.getLogger(ProtobufDecoder.class);
LoggerFactory.getLogger(ProtobufDecoder.class);
private static Class<?> protobufMessageLiteClass = null; private static Class<?> protobufMessageLiteClass = null;
private static Class<?> protobufMessageLiteBuilderClass = null; private static Class<?> protobufMessageLiteBuilderClass = null;
@ -60,23 +56,22 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
private Object parser; private Object parser;
private Object builder; private Object builder;
public ProtobufDecoder(Object prototype) { public ProtobufDecoder(Object prototype) {
try { try {
Method getDefaultInstanceForTypeMethod = protobufMessageLiteClass.getMethod( Method getDefaultInstanceForTypeMethod =
"getDefaultInstanceForType"); protobufMessageLiteClass.getMethod("getDefaultInstanceForType");
Object prototype1 = getDefaultInstanceForTypeMethod Object prototype1 =
.invoke(ObjectUtil.checkNotNull(prototype, "prototype")); getDefaultInstanceForTypeMethod.invoke(ObjectUtil.checkNotNull(prototype, "prototype"));
// parser = prototype.getParserForType() // parser = prototype.getParserForType()
parser = getParserForTypeMethod.invoke(prototype1); parser = getParserForTypeMethod.invoke(prototype1);
parseFromMethod = parser.getClass().getMethod( parseFromMethod =
"parseFrom", byte[].class, int.class, int.class); parser.getClass().getMethod("parseFrom", byte[].class, int.class, int.class);
// builder = prototype.newBuilderForType(); // builder = prototype.newBuilderForType();
builder = newBuilderForTypeMethod.invoke(prototype1); builder = newBuilderForTypeMethod.invoke(prototype1);
mergeFromMethod = builder.getClass().getMethod( mergeFromMethod =
"mergeFrom", byte[].class, int.class, int.class); builder.getClass().getMethod("mergeFrom", byte[].class, int.class, int.class);
// All protobuf message builders inherits from MessageLite.Builder // All protobuf message builders inherits from MessageLite.Builder
buildMethod = protobufMessageLiteBuilderClass.getDeclaredMethod("build"); buildMethod = protobufMessageLiteBuilderClass.getDeclaredMethod("build");
@ -88,8 +83,7 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
} }
} }
protected void decode( protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
int length = msg.readableBytes(); int length = msg.readableBytes();
byte[] array; byte[] array;
int offset; int offset;
@ -122,8 +116,8 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
try { try {
protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite"); protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite");
protobufMessageLiteBuilderClass = Class.forName( protobufMessageLiteBuilderClass =
"org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder"); Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder");
LOG.debug("Hadoop 3.3 and above shades protobuf."); LOG.debug("Hadoop 3.3 and above shades protobuf.");
} catch (ClassNotFoundException e) { } catch (ClassNotFoundException e) {
LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e); LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e);

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -22,7 +22,6 @@ import java.nio.ByteBuffer;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
@ -50,7 +49,7 @@ public class WrapperAsyncFSOutput implements AsyncFSOutput {
public WrapperAsyncFSOutput(Path file, FSDataOutputStream out) { public WrapperAsyncFSOutput(Path file, FSDataOutputStream out) {
this.out = out; this.out = out;
this.executor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true) this.executor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("AsyncFSOutputFlusher-" + file.toString().replace("%", "%%")).build()); .setNameFormat("AsyncFSOutputFlusher-" + file.toString().replace("%", "%%")).build());
} }
@Override @Override
@ -95,8 +94,8 @@ public class WrapperAsyncFSOutput implements AsyncFSOutput {
} }
long pos = out.getPos(); long pos = out.getPos();
/** /**
* This flush0 method could only be called by single thread, so here we could * This flush0 method could only be called by single thread, so here we could safely overwrite
* safely overwrite without any synchronization. * without any synchronization.
*/ */
this.syncedLength = pos; this.syncedLength = pos;
future.complete(pos); future.complete(pos);

View File

@ -56,24 +56,23 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
private final int maxExcludeDNCount; private final int maxExcludeDNCount;
private final Configuration conf; private final Configuration conf;
// This is a map of providerId->StreamSlowMonitor // This is a map of providerId->StreamSlowMonitor
private final Map<String, StreamSlowMonitor> streamSlowMonitors = private final Map<String, StreamSlowMonitor> streamSlowMonitors = new ConcurrentHashMap<>(1);
new ConcurrentHashMap<>(1);
public ExcludeDatanodeManager(Configuration conf) { public ExcludeDatanodeManager(Configuration conf) {
this.conf = conf; this.conf = conf;
this.maxExcludeDNCount = conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, this.maxExcludeDNCount = conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT); DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT);
this.excludeDNsCache = CacheBuilder.newBuilder() this.excludeDNsCache = CacheBuilder.newBuilder()
.expireAfterWrite(this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, .expireAfterWrite(
DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS) this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
.maximumSize(this.maxExcludeDNCount) TimeUnit.HOURS)
.build(); .maximumSize(this.maxExcludeDNCount).build();
} }
/** /**
* Try to add a datanode to the regionserver excluding cache * Try to add a datanode to the regionserver excluding cache
* @param datanodeInfo the datanode to be added to the excluded cache * @param datanodeInfo the datanode to be added to the excluded cache
* @param cause the cause that the datanode is hope to be excluded * @param cause the cause that the datanode is hope to be excluded
* @return True if the datanode is added to the regionserver excluding cache, false otherwise * @return True if the datanode is added to the regionserver excluding cache, false otherwise
*/ */
public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) { public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) {
@ -85,15 +84,15 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
datanodeInfo, cause, excludeDNsCache.size()); datanodeInfo, cause, excludeDNsCache.size());
return true; return true;
} }
LOG.debug("Try add datanode {} to exclude cache by [{}] failed, " LOG.debug(
+ "current exclude DNs are {}", datanodeInfo, cause, getExcludeDNs().keySet()); "Try add datanode {} to exclude cache by [{}] failed, " + "current exclude DNs are {}",
datanodeInfo, cause, getExcludeDNs().keySet());
return false; return false;
} }
public StreamSlowMonitor getStreamSlowMonitor(String name) { public StreamSlowMonitor getStreamSlowMonitor(String name) {
String key = name == null || name.isEmpty() ? "defaultMonitorName" : name; String key = name == null || name.isEmpty() ? "defaultMonitorName" : name;
return streamSlowMonitors return streamSlowMonitors.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this));
.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this));
} }
public Map<DatanodeInfo, Long> getExcludeDNs() { public Map<DatanodeInfo, Long> getExcludeDNs() {
@ -105,10 +104,12 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
for (StreamSlowMonitor monitor : streamSlowMonitors.values()) { for (StreamSlowMonitor monitor : streamSlowMonitors.values()) {
monitor.onConfigurationChange(conf); monitor.onConfigurationChange(conf);
} }
this.excludeDNsCache = CacheBuilder.newBuilder().expireAfterWrite( this.excludeDNsCache = CacheBuilder.newBuilder()
this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), .expireAfterWrite(
TimeUnit.HOURS).maximumSize(this.conf this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) TimeUnit.HOURS)
.maximumSize(this.conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
.build(); .build();
} }
} }

View File

@ -38,18 +38,16 @@ import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
/** /**
* Class for monitor the wal file flush performance. * Class for monitor the wal file flush performance. Each active wal file has a StreamSlowMonitor.
* Each active wal file has a StreamSlowMonitor.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class StreamSlowMonitor implements ConfigurationObserver { public class StreamSlowMonitor implements ConfigurationObserver {
private static final Logger LOG = LoggerFactory.getLogger(StreamSlowMonitor.class); private static final Logger LOG = LoggerFactory.getLogger(StreamSlowMonitor.class);
/** /**
* Configure for the min count for a datanode detected slow. * Configure for the min count for a datanode detected slow. If a datanode is detected slow times
* If a datanode is detected slow times up to this count, then it will be added to the exclude * up to this count, then it will be added to the exclude datanode cache by
* datanode cache by {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} * {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} of this regionsever.
* of this regionsever.
*/ */
private static final String WAL_SLOW_DETECT_MIN_COUNT_KEY = private static final String WAL_SLOW_DETECT_MIN_COUNT_KEY =
"hbase.regionserver.async.wal.min.slow.detect.count"; "hbase.regionserver.async.wal.min.slow.detect.count";
@ -63,9 +61,9 @@ public class StreamSlowMonitor implements ConfigurationObserver {
private static final long DEFAULT_WAL_SLOW_DETECT_DATA_TTL = 10 * 60 * 1000; // 10min in ms private static final long DEFAULT_WAL_SLOW_DETECT_DATA_TTL = 10 * 60 * 1000; // 10min in ms
/** /**
* Configure for the speed check of packet min length. * Configure for the speed check of packet min length. For packets whose data length smaller than
* For packets whose data length smaller than this value, check slow by processing time. * this value, check slow by processing time. While for packets whose data length larger than this
* While for packets whose data length larger than this value, check slow by flushing speed. * value, check slow by flushing speed.
*/ */
private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY = private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY =
"hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min"; "hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min";
@ -73,8 +71,8 @@ public class StreamSlowMonitor implements ConfigurationObserver {
private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024; private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024;
/** /**
* Configure for the slow packet process time, a duration from send to ACK. * Configure for the slow packet process time, a duration from send to ACK. The processing time
* The processing time check is for packets that data length smaller than * check is for packets that data length smaller than
* {@link StreamSlowMonitor#DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY} * {@link StreamSlowMonitor#DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY}
*/ */
public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY = public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY =
@ -105,15 +103,16 @@ public class StreamSlowMonitor implements ConfigurationObserver {
private long minLengthForSpeedCheck; private long minLengthForSpeedCheck;
public StreamSlowMonitor(Configuration conf, String name, public StreamSlowMonitor(Configuration conf, String name,
ExcludeDatanodeManager excludeDatanodeManager) { ExcludeDatanodeManager excludeDatanodeManager) {
setConf(conf); setConf(conf);
this.name = name; this.name = name;
this.excludeDatanodeManager = excludeDatanodeManager; this.excludeDatanodeManager = excludeDatanodeManager;
this.datanodeSlowDataQueue = CacheBuilder.newBuilder() this.datanodeSlowDataQueue = CacheBuilder.newBuilder()
.maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, .maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
.expireAfterWrite(conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, .expireAfterWrite(
DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS) conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
TimeUnit.HOURS)
.build(new CacheLoader<DatanodeInfo, Deque<PacketAckData>>() { .build(new CacheLoader<DatanodeInfo, Deque<PacketAckData>>() {
@Override @Override
public Deque<PacketAckData> load(DatanodeInfo key) throws Exception { public Deque<PacketAckData> load(DatanodeInfo key) throws Exception {
@ -129,30 +128,33 @@ public class StreamSlowMonitor implements ConfigurationObserver {
/** /**
* Check if the packet process time shows that the relevant datanode is a slow node. * Check if the packet process time shows that the relevant datanode is a slow node.
* @param datanodeInfo the datanode that processed the packet * @param datanodeInfo the datanode that processed the packet
* @param packetDataLen the data length of the packet (in bytes) * @param packetDataLen the data length of the packet (in bytes)
* @param processTimeMs the process time (in ms) of the packet on the datanode, * @param processTimeMs the process time (in ms) of the packet on the datanode,
* @param lastAckTimestamp the last acked timestamp of the packet on another datanode * @param lastAckTimestamp the last acked timestamp of the packet on another datanode
* @param unfinished if the packet is unfinished flushed to the datanode replicas * @param unfinished if the packet is unfinished flushed to the datanode replicas
*/ */
public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataLen, public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataLen,
long processTimeMs, long lastAckTimestamp, int unfinished) { long processTimeMs, long lastAckTimestamp, int unfinished) {
long current = EnvironmentEdgeManager.currentTime(); long current = EnvironmentEdgeManager.currentTime();
// Here are two conditions used to determine whether a datanode is slow, // Here are two conditions used to determine whether a datanode is slow,
// 1. For small packet, we just have a simple time limit, without considering // 1. For small packet, we just have a simple time limit, without considering
// the size of the packet. // the size of the packet.
// 2. For large packet, we will calculate the speed, and check if the speed is too slow. // 2. For large packet, we will calculate the speed, and check if the speed is too slow.
boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) || ( boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs)
packetDataLen > minLengthForSpeedCheck || (packetDataLen > minLengthForSpeedCheck
&& (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs); && (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs);
if (slow) { if (slow) {
// Check if large diff ack timestamp between replicas, // Check if large diff ack timestamp between replicas,
// should try to avoid misjudgments that caused by GC STW. // should try to avoid misjudgments that caused by GC STW.
if ((lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) || ( if (
lastAckTimestamp <= 0 && unfinished == 0)) { (lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2)
LOG.info("Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, " || (lastAckTimestamp <= 0 && unfinished == 0)
+ "lastAckTimestamp={}, monitor name: {}", datanodeInfo, packetDataLen, processTimeMs, ) {
unfinished, lastAckTimestamp, this.name); LOG.info(
"Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, "
+ "lastAckTimestamp={}, monitor name: {}",
datanodeInfo, packetDataLen, processTimeMs, unfinished, lastAckTimestamp, this.name);
if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) { if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) {
excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack"); excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack");
} }
@ -168,8 +170,10 @@ public class StreamSlowMonitor implements ConfigurationObserver {
private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long processTime) { private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long processTime) {
Deque<PacketAckData> slowDNQueue = datanodeSlowDataQueue.getUnchecked(datanodeInfo); Deque<PacketAckData> slowDNQueue = datanodeSlowDataQueue.getUnchecked(datanodeInfo);
long current = EnvironmentEdgeManager.currentTime(); long current = EnvironmentEdgeManager.currentTime();
while (!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl while (
|| slowDNQueue.size() >= minSlowDetectCount)) { !slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl
|| slowDNQueue.size() >= minSlowDetectCount)
) {
slowDNQueue.removeFirst(); slowDNQueue.removeFirst();
} }
slowDNQueue.addLast(new PacketAckData(dataLength, processTime)); slowDNQueue.addLast(new PacketAckData(dataLength, processTime));
@ -177,13 +181,13 @@ public class StreamSlowMonitor implements ConfigurationObserver {
} }
private void setConf(Configuration conf) { private void setConf(Configuration conf) {
this.minSlowDetectCount = conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, this.minSlowDetectCount =
DEFAULT_WAL_SLOW_DETECT_MIN_COUNT); conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, DEFAULT_WAL_SLOW_DETECT_MIN_COUNT);
this.slowDataTtl = conf.getLong(WAL_SLOW_DETECT_DATA_TTL_KEY, DEFAULT_WAL_SLOW_DETECT_DATA_TTL); this.slowDataTtl = conf.getLong(WAL_SLOW_DETECT_DATA_TTL_KEY, DEFAULT_WAL_SLOW_DETECT_DATA_TTL);
this.slowPacketAckMs = conf.getLong(DATANODE_SLOW_PACKET_PROCESS_TIME_KEY, this.slowPacketAckMs = conf.getLong(DATANODE_SLOW_PACKET_PROCESS_TIME_KEY,
DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME); DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME);
this.minLengthForSpeedCheck = conf.getLong( this.minLengthForSpeedCheck =
DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY, conf.getLong(DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY,
DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH); DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH);
this.minPacketFlushSpeedKBs = conf.getDouble(DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY, this.minPacketFlushSpeedKBs = conf.getDouble(DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY,
DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED); DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED);

View File

@ -1,5 +1,4 @@
/* /*
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -21,8 +20,8 @@ package org.apache.hadoop.hbase.util;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
/** /**
* Similar interface as {@link org.apache.hadoop.util.Progressable} but returns * Similar interface as {@link org.apache.hadoop.util.Progressable} but returns a boolean to support
* a boolean to support canceling the operation. * canceling the operation.
* <p/> * <p/>
* Used for doing updating of OPENING znode during log replay on region open. * Used for doing updating of OPENING znode during log replay on region open.
*/ */
@ -30,8 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface CancelableProgressable { public interface CancelableProgressable {
/** /**
* Report progress. Returns true if operations should continue, false if the * Report progress. Returns true if operations should continue, false if the operation should be
* operation should be canceled and rolled back. * canceled and rolled back.
* @return whether to continue (true) or cancel (false) the operation * @return whether to continue (true) or cancel (false) the operation
*/ */
boolean progress(); boolean progress();

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -120,8 +120,10 @@ public final class RecoverLeaseFSUtils {
// Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check // Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check
// isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though. // isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though.
long localStartWaiting = EnvironmentEdgeManager.currentTime(); long localStartWaiting = EnvironmentEdgeManager.currentTime();
while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPauseBase * while (
nbAttempt) { (EnvironmentEdgeManager.currentTime() - localStartWaiting)
< subsequentPauseBase * nbAttempt
) {
Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000)); Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
if (findIsFileClosedMeth) { if (findIsFileClosedMeth) {
try { try {
@ -152,10 +154,10 @@ public final class RecoverLeaseFSUtils {
private static boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout, private static boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout,
final int nbAttempt, final Path p, final long startWaiting) { final int nbAttempt, final Path p, final long startWaiting) {
if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) { if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) {
LOG.warn("Cannot recoverLease after trying for " + LOG.warn("Cannot recoverLease after trying for "
conf.getInt("hbase.lease.recovery.timeout", 900000) + + conf.getInt("hbase.lease.recovery.timeout", 900000)
"ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + + "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; "
getLogMessageDetail(nbAttempt, p, startWaiting)); + getLogMessageDetail(nbAttempt, p, startWaiting));
return true; return true;
} }
return false; return false;
@ -170,8 +172,8 @@ public final class RecoverLeaseFSUtils {
boolean recovered = false; boolean recovered = false;
try { try {
recovered = dfs.recoverLease(p); recovered = dfs.recoverLease(p);
LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") + LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ")
getLogMessageDetail(nbAttempt, p, startWaiting)); + getLogMessageDetail(nbAttempt, p, startWaiting));
} catch (IOException e) { } catch (IOException e) {
if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) { if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) {
// This exception comes out instead of FNFE, fix it // This exception comes out instead of FNFE, fix it
@ -189,8 +191,8 @@ public final class RecoverLeaseFSUtils {
*/ */
private static String getLogMessageDetail(final int nbAttempt, final Path p, private static String getLogMessageDetail(final int nbAttempt, final Path p,
final long startWaiting) { final long startWaiting) {
return "attempt=" + nbAttempt + " on file=" + p + " after " + return "attempt=" + nbAttempt + " on file=" + p + " after "
(EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms";
} }
/** /**

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.io.asyncfs;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
@ -44,19 +45,15 @@ public class TestExcludeDatanodeManager {
StreamSlowMonitor streamSlowDNsMonitor = StreamSlowMonitor streamSlowDNsMonitor =
excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
DatanodeInfo datanodeInfo = DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0")
new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1") .setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222)
.setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333) .setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build();
.setIpcPort(444).setNetworkLocation("location1").build(); streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
streamSlowDNsMonitor System.currentTimeMillis() - 5100, 0);
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
System.currentTimeMillis() - 5100, 0); System.currentTimeMillis() - 5100, 0);
streamSlowDNsMonitor streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, System.currentTimeMillis() - 5100, 0);
System.currentTimeMillis() - 5100, 0);
streamSlowDNsMonitor
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
System.currentTimeMillis() - 5100, 0);
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo)); assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo));
} }
@ -68,19 +65,15 @@ public class TestExcludeDatanodeManager {
StreamSlowMonitor streamSlowDNsMonitor = StreamSlowMonitor streamSlowDNsMonitor =
excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
DatanodeInfo datanodeInfo = DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0")
new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1") .setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222)
.setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333) .setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build();
.setIpcPort(444).setNetworkLocation("location1").build(); streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
streamSlowDNsMonitor System.currentTimeMillis() - 7000, 0);
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
System.currentTimeMillis() - 7000, 0); System.currentTimeMillis() - 7000, 0);
streamSlowDNsMonitor streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, System.currentTimeMillis() - 7000, 0);
System.currentTimeMillis() - 7000, 0);
streamSlowDNsMonitor
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
System.currentTimeMillis() - 7000, 0);
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo)); assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo));
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -57,6 +57,7 @@ import org.junit.experimental.categories.Category;
import org.junit.rules.TestName; import org.junit.rules.TestName;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.Channel;
import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
@ -240,9 +241,9 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
StreamSlowMonitor streamSlowDNsMonitor = StreamSlowMonitor streamSlowDNsMonitor =
excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, try (FanOutOneBlockAsyncDFSOutput output =
f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3,
CHANNEL_CLASS, streamSlowDNsMonitor)) { FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, streamSlowDNsMonitor)) {
// should exclude the dead dn when retry so here we only have 2 DNs in pipeline // should exclude the dead dn when retry so here we only have 2 DNs in pipeline
assertEquals(2, output.getPipeline().length); assertEquals(2, output.getPipeline().length);
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -47,6 +47,7 @@ import org.junit.experimental.categories.Category;
import org.junit.rules.TestName; import org.junit.rules.TestName;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.Channel;
import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
@ -70,10 +71,10 @@ public class TestFanOutOneBlockAsyncDFSOutputHang extends AsyncFSTestBase {
@ClassRule @ClassRule
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutputHang.class); HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutputHang.class);
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutputHang.class); LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutputHang.class);
private static DistributedFileSystem FS; private static DistributedFileSystem FS;

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -31,7 +31,7 @@ public class TestSendBufSizePredictor {
@ClassRule @ClassRule
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestSendBufSizePredictor.class); HBaseClassTestRule.forClass(TestSendBufSizePredictor.class);
@Test @Test
public void test() { public void test() {

View File

@ -110,9 +110,9 @@ public final class HBaseKerberosUtils {
/** /**
* Set up configuration for a secure HDFS+HBase cluster. * Set up configuration for a secure HDFS+HBase cluster.
* @param conf configuration object. * @param conf configuration object.
* @param servicePrincipal service principal used by NN, HM and RS. * @param servicePrincipal service principal used by NN, HM and RS.
* @param spnegoPrincipal SPNEGO principal used by NN web UI. * @param spnegoPrincipal SPNEGO principal used by NN web UI.
*/ */
public static void setSecuredConfiguration(Configuration conf, String servicePrincipal, public static void setSecuredConfiguration(Configuration conf, String servicePrincipal,
String spnegoPrincipal) { String spnegoPrincipal) {
@ -156,7 +156,7 @@ public final class HBaseKerberosUtils {
/** /**
* Set up SSL configuration for HDFS NameNode and DataNode. * Set up SSL configuration for HDFS NameNode and DataNode.
* @param utility a HBaseTestingUtility object. * @param utility a HBaseTestingUtility object.
* @param clazz the caller test class. * @param clazz the caller test class.
* @throws Exception if unable to set up SSL configuration * @throws Exception if unable to set up SSL configuration
*/ */
public static void setSSLConfiguration(HBaseCommonTestingUtil utility, Class<?> clazz) public static void setSSLConfiguration(HBaseCommonTestingUtil utility, Class<?> clazz)

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.util;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
@ -69,8 +68,8 @@ public class TestRecoverLeaseFSUtils {
Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
// Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two
// invocations will happen pretty fast... the we fall into the longer wait loop). // invocations will happen pretty fast... the we fall into the longer wait loop).
assertTrue((EnvironmentEdgeManager.currentTime() - startTime) > (3 * assertTrue((EnvironmentEdgeManager.currentTime() - startTime)
HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); > (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
} }
/** /**

View File

@ -1,4 +1,4 @@
<?xml version="1.0"?> <?xml version="1.0" encoding="UTF-8"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> <project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!-- <!--
/** /**
@ -21,34 +21,14 @@
--> -->
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<parent> <parent>
<artifactId>hbase-build-configuration</artifactId>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-build-configuration</artifactId>
<version>3.0.0-alpha-3-SNAPSHOT</version> <version>3.0.0-alpha-3-SNAPSHOT</version>
<relativePath>../hbase-build-configuration</relativePath> <relativePath>../hbase-build-configuration</relativePath>
</parent> </parent>
<artifactId>hbase-backup</artifactId> <artifactId>hbase-backup</artifactId>
<name>Apache HBase - Backup</name> <name>Apache HBase - Backup</name>
<description>Backup for HBase</description> <description>Backup for HBase</description>
<build>
<plugins>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
<plugin>
<groupId>net.revelc.code</groupId>
<artifactId>warbucks-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
<dependencies> <dependencies>
<!-- Intra-project dependencies --> <!-- Intra-project dependencies -->
<dependency> <dependency>
@ -173,12 +153,34 @@
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
</dependencies> </dependencies>
<build>
<plugins>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
<plugin>
<groupId>net.revelc.code</groupId>
<artifactId>warbucks-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
<profiles> <profiles>
<!-- Profile for building against Hadoop 3.0.0. Activate by default --> <!-- Profile for building against Hadoop 3.0.0. Activate by default -->
<profile> <profile>
<id>hadoop-3.0</id> <id>hadoop-3.0</id>
<activation> <activation>
<property><name>!hadoop.profile</name></property> <property>
<name>!hadoop.profile</name>
</property>
</activation> </activation>
<dependencies> <dependencies>
<dependency> <dependency>
@ -213,8 +215,7 @@
<artifactId>lifecycle-mapping</artifactId> <artifactId>lifecycle-mapping</artifactId>
<configuration> <configuration>
<lifecycleMappingMetadata> <lifecycleMappingMetadata>
<pluginExecutions> <pluginExecutions/>
</pluginExecutions>
</lifecycleMappingMetadata> </lifecycleMappingMetadata>
</configuration> </configuration>
</plugin> </plugin>

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.util.BackupSet; import org.apache.hadoop.hbase.backup.util.BackupSet;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -30,8 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience;
* The administrative API for HBase Backup. Construct an instance and call {@link #close()} * The administrative API for HBase Backup. Construct an instance and call {@link #close()}
* afterwards. * afterwards.
* <p> * <p>
* BackupAdmin can be used to create backups, restore data from backups and for other * BackupAdmin can be used to create backups, restore data from backups and for other backup-related
* backup-related operations. * operations.
* @since 2.0 * @since 2.0
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@ -71,9 +69,9 @@ public interface BackupAdmin extends Closeable {
/** /**
* Merge backup images command * Merge backup images command
* @param backupIds array of backup ids of images to be merged * @param backupIds array of backup ids of images to be merged The resulting backup image will
* The resulting backup image will have the same backup id as the most * have the same backup id as the most recent image from a list of images to be
* recent image from a list of images to be merged * merged
* @throws IOException exception * @throws IOException exception
*/ */
void mergeBackups(String[] backupIds) throws IOException; void mergeBackups(String[] backupIds) throws IOException;
@ -120,7 +118,7 @@ public interface BackupAdmin extends Closeable {
/** /**
* Add tables to backup set command * Add tables to backup set command
* @param name name of backup set. * @param name name of backup set.
* @param tables array of tables to be added to this set. * @param tables array of tables to be added to this set.
* @throws IOException exception * @throws IOException exception
*/ */
@ -128,7 +126,7 @@ public interface BackupAdmin extends Closeable {
/** /**
* Remove tables from backup set * Remove tables from backup set
* @param name name of backup set. * @param name name of backup set.
* @param tables array of tables to be removed from this set. * @param tables array of tables to be removed from this set.
* @throws IOException exception * @throws IOException exception
*/ */

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -18,13 +18,11 @@
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient; import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
import org.apache.hadoop.hbase.backup.impl.TableBackupClient; import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,11 +15,9 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.impl.BackupManager;
@ -34,16 +32,16 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface BackupCopyJob extends Configurable { public interface BackupCopyJob extends Configurable {
/** /**
* Copy backup data to destination * Copy backup data to destination
* @param backupInfo context object * @param backupInfo context object
* @param backupManager backup manager * @param backupManager backup manager
* @param conf configuration * @param conf configuration
* @param backupType backup type (FULL or INCREMENTAL) * @param backupType backup type (FULL or INCREMENTAL)
* @param options array of options (implementation-specific) * @param options array of options (implementation-specific)
* @return result (0 - success, -1 failure ) * @return result (0 - success, -1 failure )
* @throws IOException exception * @throws IOException exception
*/ */
int copy(BackupInfo backupInfo, BackupManager backupManager, Configuration conf, int copy(BackupInfo backupInfo, BackupManager backupManager, Configuration conf,
BackupType backupType, String[] options) throws IOException; BackupType backupType, String[] options) throws IOException;
/** /**
* Cancel copy job * Cancel copy job

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -58,9 +58,7 @@ import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
/** /**
*
* Command-line entry point for backup operation * Command-line entry point for backup operation
*
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class BackupDriver extends AbstractHBaseTool { public class BackupDriver extends AbstractHBaseTool {

View File

@ -23,7 +23,6 @@ import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -54,7 +53,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
private Connection connection; private Connection connection;
private long prevReadFromBackupTbl = 0, // timestamp of most recent read from backup:system table private long prevReadFromBackupTbl = 0, // timestamp of most recent read from backup:system table
secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from backup:system table secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from backup:system table
//used by unit test to skip reading backup:system // used by unit test to skip reading backup:system
private boolean checkForFullyBackedUpTables = true; private boolean checkForFullyBackedUpTables = true;
private List<TableName> fullyBackedUpTables = null; private List<TableName> fullyBackedUpTables = null;
@ -79,8 +78,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
connection = ConnectionFactory.createConnection(conf); connection = ConnectionFactory.createConnection(conf);
} }
try (BackupSystemTable tbl = new BackupSystemTable(connection)) { try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
Map<byte[], List<Path>>[] res = Map<byte[], List<Path>>[] res = tbl.readBulkLoadedFiles(null, tableList);
tbl.readBulkLoadedFiles(null, tableList);
secondPrevReadFromBackupTbl = prevReadFromBackupTbl; secondPrevReadFromBackupTbl = prevReadFromBackupTbl;
prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime(); prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime();
return getFilenameFromBulkLoad(res); return getFilenameFromBulkLoad(res);
@ -91,6 +89,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
void setCheckForFullyBackedUpTables(boolean b) { void setCheckForFullyBackedUpTables(boolean b) {
checkForFullyBackedUpTables = b; checkForFullyBackedUpTables = b;
} }
@Override @Override
public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) { public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
if (conf == null) { if (conf == null) {

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
import java.io.IOException; import java.io.IOException;
@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
@ -59,7 +59,10 @@ public class BackupInfo implements Comparable<BackupInfo> {
* Backup session states * Backup session states
*/ */
public enum BackupState { public enum BackupState {
RUNNING, COMPLETE, FAILED, ANY RUNNING,
COMPLETE,
FAILED,
ANY
} }
/** /**
@ -67,7 +70,12 @@ public class BackupInfo implements Comparable<BackupInfo> {
* BackupState.RUNNING * BackupState.RUNNING
*/ */
public enum BackupPhase { public enum BackupPhase {
REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST REQUEST,
SNAPSHOT,
PREPARE_INCREMENTAL,
SNAPSHOTCOPY,
INCREMENTAL_COPY,
STORE_MANIFEST
} }
/** /**
@ -137,8 +145,8 @@ public class BackupInfo implements Comparable<BackupInfo> {
private Map<TableName, Map<String, Long>> tableSetTimestampMap; private Map<TableName, Map<String, Long>> tableSetTimestampMap;
/** /**
* Previous Region server log timestamps for table set after distributed log roll key - * Previous Region server log timestamps for table set after distributed log roll key - table
* table name, value - map of RegionServer hostname -> last log rolled timestamp * name, value - map of RegionServer hostname -> last log rolled timestamp
*/ */
private Map<TableName, Map<String, Long>> incrTimestampMap; private Map<TableName, Map<String, Long>> incrTimestampMap;
@ -198,8 +206,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
return tableSetTimestampMap; return tableSetTimestampMap;
} }
public void setTableSetTimestampMap(Map<TableName, public void setTableSetTimestampMap(Map<TableName, Map<String, Long>> tableSetTimestampMap) {
Map<String, Long>> tableSetTimestampMap) {
this.tableSetTimestampMap = tableSetTimestampMap; this.tableSetTimestampMap = tableSetTimestampMap;
} }
@ -357,8 +364,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
* Set the new region server log timestamps after distributed log roll * Set the new region server log timestamps after distributed log roll
* @param prevTableSetTimestampMap table timestamp map * @param prevTableSetTimestampMap table timestamp map
*/ */
public void setIncrTimestampMap(Map<TableName, public void setIncrTimestampMap(Map<TableName, Map<String, Long>> prevTableSetTimestampMap) {
Map<String, Long>> prevTableSetTimestampMap) {
this.incrTimestampMap = prevTableSetTimestampMap; this.incrTimestampMap = prevTableSetTimestampMap;
} }
@ -482,8 +488,8 @@ public class BackupInfo implements Comparable<BackupInfo> {
context.setState(BackupInfo.BackupState.valueOf(proto.getBackupState().name())); context.setState(BackupInfo.BackupState.valueOf(proto.getBackupState().name()));
} }
context.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(), context
proto.getBackupId())); .setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(), proto.getBackupId()));
if (proto.hasBackupPhase()) { if (proto.hasBackupPhase()) {
context.setPhase(BackupPhase.valueOf(proto.getBackupPhase().name())); context.setPhase(BackupPhase.valueOf(proto.getBackupPhase().name()));
@ -507,12 +513,12 @@ public class BackupInfo implements Comparable<BackupInfo> {
return map; return map;
} }
private static Map<TableName, Map<String, Long>> getTableSetTimestampMap( private static Map<TableName, Map<String, Long>>
Map<String, BackupProtos.BackupInfo.RSTimestampMap> map) { getTableSetTimestampMap(Map<String, BackupProtos.BackupInfo.RSTimestampMap> map) {
Map<TableName, Map<String, Long>> tableSetTimestampMap = new HashMap<>(); Map<TableName, Map<String, Long>> tableSetTimestampMap = new HashMap<>();
for (Entry<String, BackupProtos.BackupInfo.RSTimestampMap> entry : map.entrySet()) { for (Entry<String, BackupProtos.BackupInfo.RSTimestampMap> entry : map.entrySet()) {
tableSetTimestampMap tableSetTimestampMap.put(TableName.valueOf(entry.getKey()),
.put(TableName.valueOf(entry.getKey()), entry.getValue().getRsTimestampMap()); entry.getValue().getRsTimestampMap());
} }
return tableSetTimestampMap; return tableSetTimestampMap;
@ -549,7 +555,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
public String getStatusAndProgressAsString() { public String getStatusAndProgressAsString() {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
sb.append("id: ").append(getBackupId()).append(" state: ").append(getState()) sb.append("id: ").append(getBackupId()).append(" state: ").append(getState())
.append(" progress: ").append(getProgress()); .append(" progress: ").append(getProgress());
return sb.toString(); return sb.toString();
} }
@ -567,7 +573,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
@Override @Override
public int compareTo(BackupInfo o) { public int compareTo(BackupInfo o) {
Long thisTS = Long thisTS =
Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1)); Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1));
Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1)); Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1));
return thisTS.compareTo(otherTS); return thisTS.compareTo(otherTS);
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,11 +15,9 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configurable;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -32,7 +30,6 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface BackupMergeJob extends Configurable { public interface BackupMergeJob extends Configurable {
/** /**
* Run backup merge operation. * Run backup merge operation.
*
* @param backupIds backup image ids * @param backupIds backup image ids
* @throws IOException if the backup merge operation fails * @throws IOException if the backup merge operation fails
*/ */

View File

@ -7,14 +7,13 @@
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, * Unless required by applicable law or agreed to in writing, software
* software distributed under the License is distributed on an * distributed under the License is distributed on an "AS IS" BASIS,
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* KIND, either express or implied. See the License for the * See the License for the specific language governing permissions and
* specific language governing permissions and limitations * limitations under the License.
* under the License.
*/ */
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
@ -22,7 +21,6 @@ import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Optional; import java.util.Optional;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@ -56,7 +54,7 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver {
@Override @Override
public void postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx, public void postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths) List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths)
throws IOException { throws IOException {
Configuration cfg = ctx.getEnvironment().getConfiguration(); Configuration cfg = ctx.getEnvironment().getConfiguration();
if (finalPaths == null) { if (finalPaths == null) {
// there is no need to record state // there is no need to record state
@ -67,7 +65,7 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver {
return; return;
} }
try (Connection connection = ConnectionFactory.createConnection(cfg); try (Connection connection = ConnectionFactory.createConnection(cfg);
BackupSystemTable tbl = new BackupSystemTable(connection)) { BackupSystemTable tbl = new BackupSystemTable(connection)) {
List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL); List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
RegionInfo info = ctx.getEnvironment().getRegionInfo(); RegionInfo info = ctx.getEnvironment().getRegionInfo();
TableName tableName = info.getTable(); TableName tableName = info.getTable();
@ -82,16 +80,17 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver {
LOG.error("Failed to get tables which have been fully backed up", ioe); LOG.error("Failed to get tables which have been fully backed up", ioe);
} }
} }
@Override @Override
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx, public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException { final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
Configuration cfg = ctx.getEnvironment().getConfiguration(); Configuration cfg = ctx.getEnvironment().getConfiguration();
if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) { if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) {
LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled"); LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled");
return; return;
} }
try (Connection connection = ConnectionFactory.createConnection(cfg); try (Connection connection = ConnectionFactory.createConnection(cfg);
BackupSystemTable tbl = new BackupSystemTable(connection)) { BackupSystemTable tbl = new BackupSystemTable(connection)) {
List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL); List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
RegionInfo info = ctx.getEnvironment().getRegionInfo(); RegionInfo info = ctx.getEnvironment().getRegionInfo();
TableName tableName = info.getTable(); TableName tableName = info.getTable();

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,11 +15,9 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
import java.util.List; import java.util.List;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
@ -45,14 +44,14 @@ public interface BackupRestoreConstants {
int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 10000; int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 10000;
/* /*
* Drivers option list * Drivers option list
*/ */
String OPTION_OVERWRITE = "o"; String OPTION_OVERWRITE = "o";
String OPTION_OVERWRITE_DESC = "Overwrite data if any of the restore target tables exists"; String OPTION_OVERWRITE_DESC = "Overwrite data if any of the restore target tables exists";
String OPTION_CHECK = "c"; String OPTION_CHECK = "c";
String OPTION_CHECK_DESC = String OPTION_CHECK_DESC =
"Check restore sequence and dependencies only (does not execute the command)"; "Check restore sequence and dependencies only (does not execute the command)";
String OPTION_SET = "s"; String OPTION_SET = "s";
String OPTION_SET_DESC = "Backup set name"; String OPTION_SET_DESC = "Backup set name";
@ -62,8 +61,8 @@ public interface BackupRestoreConstants {
String OPTION_DEBUG_DESC = "Enable debug loggings"; String OPTION_DEBUG_DESC = "Enable debug loggings";
String OPTION_TABLE = "t"; String OPTION_TABLE = "t";
String OPTION_TABLE_DESC = "Table name. If specified, only backup images," String OPTION_TABLE_DESC =
+ " which contain this table will be listed."; "Table name. If specified, only backup images," + " which contain this table will be listed.";
String OPTION_LIST = "l"; String OPTION_LIST = "l";
String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated."; String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated.";
@ -84,37 +83,32 @@ public interface BackupRestoreConstants {
String OPTION_KEEP = "k"; String OPTION_KEEP = "k";
String OPTION_KEEP_DESC = "Specifies maximum age of backup (in days) to keep during bulk delete"; String OPTION_KEEP_DESC = "Specifies maximum age of backup (in days) to keep during bulk delete";
String OPTION_TABLE_MAPPING = "m"; String OPTION_TABLE_MAPPING = "m";
String OPTION_TABLE_MAPPING_DESC = String OPTION_TABLE_MAPPING_DESC = "A comma separated list of target tables. "
"A comma separated list of target tables. " + "If specified, each table in <tables> must have a mapping";
+ "If specified, each table in <tables> must have a mapping";
String OPTION_YARN_QUEUE_NAME = "q"; String OPTION_YARN_QUEUE_NAME = "q";
String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on"; String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on";
String OPTION_YARN_QUEUE_NAME_RESTORE_DESC = "Yarn queue name to run backup restore command on"; String OPTION_YARN_QUEUE_NAME_RESTORE_DESC = "Yarn queue name to run backup restore command on";
String JOB_NAME_CONF_KEY = "mapreduce.job.name"; String JOB_NAME_CONF_KEY = "mapreduce.job.name";
String BACKUP_CONFIG_STRING = BackupRestoreConstants.BACKUP_ENABLE_KEY String BACKUP_CONFIG_STRING =
+ "=true\n" BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n" + "hbase.master.logcleaner.plugins="
+ "hbase.master.logcleaner.plugins=" + "YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
+"YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n" + "hbase.procedure.master.classes=YOUR_CLASSES,"
+ "hbase.procedure.master.classes=YOUR_CLASSES," + "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
+"org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n" + "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
+ "hbase.procedure.regionserver.classes=YOUR_CLASSES," + "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
+ "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n" + "hbase.coprocessor.region.classes=YOUR_CLASSES,"
+ "hbase.coprocessor.region.classes=YOUR_CLASSES," + "org.apache.hadoop.hbase.backup.BackupObserver\n" + "and restart the cluster\n"
+ "org.apache.hadoop.hbase.backup.BackupObserver\n" + "For more information please see http://hbase.apache.org/book.html#backuprestore\n";
+ "and restart the cluster\n" String ENABLE_BACKUP = "Backup is not enabled. To enable backup, " + "in hbase-site.xml, set:\n "
+ "For more information please see http://hbase.apache.org/book.html#backuprestore\n"; + BACKUP_CONFIG_STRING;
String ENABLE_BACKUP = "Backup is not enabled. To enable backup, "+
"in hbase-site.xml, set:\n "
+ BACKUP_CONFIG_STRING;
String VERIFY_BACKUP = "To enable backup, in hbase-site.xml, set:\n " + BACKUP_CONFIG_STRING; String VERIFY_BACKUP = "To enable backup, in hbase-site.xml, set:\n " + BACKUP_CONFIG_STRING;
/* /*
* Delimiter in table name list in restore command * Delimiter in table name list in restore command
*/ */
String TABLENAME_DELIMITER_IN_COMMAND = ","; String TABLENAME_DELIMITER_IN_COMMAND = ",";
@ -123,7 +117,24 @@ public interface BackupRestoreConstants {
String BACKUPID_PREFIX = "backup_"; String BACKUPID_PREFIX = "backup_";
enum BackupCommand { enum BackupCommand {
CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS, CREATE,
SET, SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST, REPAIR CANCEL,
DELETE,
DESCRIBE,
HISTORY,
STATUS,
CONVERT,
MERGE,
STOP,
SHOW,
HELP,
PROGRESS,
SET,
SET_ADD,
SET_REMOVE,
SET_DELETE,
SET_DESCRIBE,
SET_LIST,
REPAIR
} }
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -26,7 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience;
/** /**
* Factory implementation for backup/restore related jobs * Factory implementation for backup/restore related jobs
*
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public final class BackupRestoreFactory { public final class BackupRestoreFactory {
@ -45,7 +44,7 @@ public final class BackupRestoreFactory {
*/ */
public static RestoreJob getRestoreJob(Configuration conf) { public static RestoreJob getRestoreJob(Configuration conf) {
Class<? extends RestoreJob> cls = Class<? extends RestoreJob> cls =
conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class); conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class);
RestoreJob service = ReflectionUtils.newInstance(cls, conf); RestoreJob service = ReflectionUtils.newInstance(cls, conf);
service.setConf(conf); service.setConf(conf);
return service; return service;
@ -57,9 +56,8 @@ public final class BackupRestoreFactory {
* @return backup copy job instance * @return backup copy job instance
*/ */
public static BackupCopyJob getBackupCopyJob(Configuration conf) { public static BackupCopyJob getBackupCopyJob(Configuration conf) {
Class<? extends BackupCopyJob> cls = Class<? extends BackupCopyJob> cls = conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS,
conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyJob.class, MapReduceBackupCopyJob.class, BackupCopyJob.class);
BackupCopyJob.class);
BackupCopyJob service = ReflectionUtils.newInstance(cls, conf); BackupCopyJob service = ReflectionUtils.newInstance(cls, conf);
service.setConf(conf); service.setConf(conf);
return service; return service;
@ -71,9 +69,8 @@ public final class BackupRestoreFactory {
* @return backup merge job instance * @return backup merge job instance
*/ */
public static BackupMergeJob getBackupMergeJob(Configuration conf) { public static BackupMergeJob getBackupMergeJob(Configuration conf) {
Class<? extends BackupMergeJob> cls = Class<? extends BackupMergeJob> cls = conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS,
conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS, MapReduceBackupMergeJob.class, MapReduceBackupMergeJob.class, BackupMergeJob.class);
BackupMergeJob.class);
BackupMergeJob service = ReflectionUtils.newInstance(cls, conf); BackupMergeJob service = ReflectionUtils.newInstance(cls, conf);
service.setConf(conf); service.setConf(conf);
return service; return service;

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,11 +15,11 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
@ -29,14 +29,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class BackupTableInfo { public class BackupTableInfo {
/* /*
* Table name for backup * Table name for backup
*/ */
private TableName table; private TableName table;
/* /*
* Snapshot name for offline/online snapshot * Snapshot name for offline/online snapshot
*/ */
private String snapshotName = null; private String snapshotName = null;

View File

@ -1,14 +1,13 @@
/** /*
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file * regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
@ -16,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -52,15 +49,15 @@ public final class HBackupFileSystem {
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where * "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
* @param backupRootDir backup root directory * @param backupRootDir backup root directory
* @param backupId backup id * @param backupId backup id
* @param tableName table name * @param tableName table name
* @return backupPath String for the particular table * @return backupPath String for the particular table
*/ */
public static String public static String getTableBackupDir(String backupRootDir, String backupId,
getTableBackupDir(String backupRootDir, String backupId, TableName tableName) { TableName tableName) {
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
+ Path.SEPARATOR; + Path.SEPARATOR;
} }
/** /**
@ -75,7 +72,7 @@ public final class HBackupFileSystem {
/** /**
* Get backup tmp directory for backupId * Get backup tmp directory for backupId
* @param backupRoot backup root * @param backupRoot backup root
* @param backupId backup id * @param backupId backup id
* @return backup tmp directory path * @return backup tmp directory path
*/ */
public static Path getBackupTmpDirPathForBackupId(String backupRoot, String backupId) { public static Path getBackupTmpDirPathForBackupId(String backupRoot, String backupId) {
@ -83,7 +80,7 @@ public final class HBackupFileSystem {
} }
public static String getTableBackupDataDir(String backupRootDir, String backupId, public static String getTableBackupDataDir(String backupRootDir, String backupId,
TableName tableName) { TableName tableName) {
return getTableBackupDir(backupRootDir, backupId, tableName) + Path.SEPARATOR + "data"; return getTableBackupDir(backupRootDir, backupId, tableName) + Path.SEPARATOR + "data";
} }
@ -97,8 +94,8 @@ public final class HBackupFileSystem {
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where * "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
* @param backupRootPath backup root path * @param backupRootPath backup root path
* @param tableName table name * @param tableName table name
* @param backupId backup Id * @param backupId backup Id
* @return backupPath for the particular table * @return backupPath for the particular table
*/ */
public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) { public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) {
@ -109,12 +106,12 @@ public final class HBackupFileSystem {
* Given the backup root dir and the backup id, return the log file location for an incremental * Given the backup root dir and the backup id, return the log file location for an incremental
* backup. * backup.
* @param backupRootDir backup root directory * @param backupRootDir backup root directory
* @param backupId backup id * @param backupId backup id
* @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738" * @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738"
*/ */
public static String getLogBackupDir(String backupRootDir, String backupId) { public static String getLogBackupDir(String backupRootDir, String backupId) {
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
+ HConstants.HREGION_LOGDIR_NAME; + HConstants.HREGION_LOGDIR_NAME;
} }
public static Path getLogBackupPath(String backupRootDir, String backupId) { public static Path getLogBackupPath(String backupRootDir, String backupId) {
@ -124,37 +121,35 @@ public final class HBackupFileSystem {
// TODO we do not keep WAL files anymore // TODO we do not keep WAL files anymore
// Move manifest file to other place // Move manifest file to other place
private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId) private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId)
throws IOException { throws IOException {
FileSystem fs = backupRootPath.getFileSystem(conf); FileSystem fs = backupRootPath.getFileSystem(conf);
Path manifestPath = Path manifestPath = new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR
new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR + BackupManifest.MANIFEST_FILE_NAME);
+ BackupManifest.MANIFEST_FILE_NAME);
if (!fs.exists(manifestPath)) { if (!fs.exists(manifestPath)) {
String errorMsg = String errorMsg = "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME
"Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for " + " for " + backupId + ". File " + manifestPath + " does not exists. Did " + backupId
+ backupId + ". File " + manifestPath + " does not exists. Did " + backupId + " correspond to previously taken backup ?";
+ " correspond to previously taken backup ?";
throw new IOException(errorMsg); throw new IOException(errorMsg);
} }
return manifestPath; return manifestPath;
} }
public static BackupManifest public static BackupManifest getManifest(Configuration conf, Path backupRootPath, String backupId)
getManifest(Configuration conf, Path backupRootPath, String backupId) throws IOException { throws IOException {
BackupManifest manifest = BackupManifest manifest =
new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId)); new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId));
return manifest; return manifest;
} }
/** /**
* Check whether the backup image path and there is manifest file in the path. * Check whether the backup image path and there is manifest file in the path.
* @param backupManifestMap If all the manifests are found, then they are put into this map * @param backupManifestMap If all the manifests are found, then they are put into this map
* @param tableArray the tables involved * @param tableArray the tables involved
* @throws IOException exception * @throws IOException exception
*/ */
public static void checkImageManifestExist(HashMap<TableName, BackupManifest> backupManifestMap, public static void checkImageManifestExist(HashMap<TableName, BackupManifest> backupManifestMap,
TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId) TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId)
throws IOException { throws IOException {
for (TableName tableName : tableArray) { for (TableName tableName : tableArray) {
BackupManifest manifest = getManifest(conf, backupRootPath, backupId); BackupManifest manifest = getManifest(conf, backupRootPath, backupId);
backupManifestMap.put(tableName, manifest); backupManifestMap.put(tableName, manifest);

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -59,9 +59,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
/** /**
*
* Command-line entry point for restore operation * Command-line entry point for restore operation
*
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class RestoreDriver extends AbstractHBaseTool { public class RestoreDriver extends AbstractHBaseTool {
@ -69,10 +67,10 @@ public class RestoreDriver extends AbstractHBaseTool {
private CommandLine cmd; private CommandLine cmd;
private static final String USAGE_STRING = private static final String USAGE_STRING =
"Usage: hbase restore <backup_path> <backup_id> [options]\n" "Usage: hbase restore <backup_path> <backup_id> [options]\n"
+ " backup_path Path to a backup destination root\n" + " backup_path Path to a backup destination root\n"
+ " backup_id Backup image ID to restore\n" + " backup_id Backup image ID to restore\n"
+ " table(s) Comma-separated list of tables to restore\n"; + " table(s) Comma-separated list of tables to restore\n";
private static final String USAGE_FOOTER = ""; private static final String USAGE_FOOTER = "";
@ -101,19 +99,19 @@ public class RestoreDriver extends AbstractHBaseTool {
boolean overwrite = cmd.hasOption(OPTION_OVERWRITE); boolean overwrite = cmd.hasOption(OPTION_OVERWRITE);
if (overwrite) { if (overwrite) {
LOG.debug("Found -overwrite option in restore command, " LOG.debug("Found -overwrite option in restore command, "
+ "will overwrite to existing table if any in the restore target"); + "will overwrite to existing table if any in the restore target");
} }
// whether to only check the dependencies, false by default // whether to only check the dependencies, false by default
boolean check = cmd.hasOption(OPTION_CHECK); boolean check = cmd.hasOption(OPTION_CHECK);
if (check) { if (check) {
LOG.debug("Found -check option in restore command, " LOG.debug(
+ "will check and verify the dependencies"); "Found -check option in restore command, " + "will check and verify the dependencies");
} }
if (cmd.hasOption(OPTION_SET) && cmd.hasOption(OPTION_TABLE)) { if (cmd.hasOption(OPTION_SET) && cmd.hasOption(OPTION_TABLE)) {
System.err.println("Options -s and -t are mutaully exclusive,"+ System.err.println(
" you can not specify both of them."); "Options -s and -t are mutaully exclusive," + " you can not specify both of them.");
printToolUsage(); printToolUsage();
return -1; return -1;
} }
@ -141,9 +139,9 @@ public class RestoreDriver extends AbstractHBaseTool {
String backupId = remainArgs[1]; String backupId = remainArgs[1];
String tables; String tables;
String tableMapping = String tableMapping =
cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null; cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
try (final Connection conn = ConnectionFactory.createConnection(conf); try (final Connection conn = ConnectionFactory.createConnection(conf);
BackupAdmin client = new BackupAdminImpl(conn)) { BackupAdmin client = new BackupAdminImpl(conn)) {
// Check backup set // Check backup set
if (cmd.hasOption(OPTION_SET)) { if (cmd.hasOption(OPTION_SET)) {
String setName = cmd.getOptionValue(OPTION_SET); String setName = cmd.getOptionValue(OPTION_SET);
@ -155,8 +153,8 @@ public class RestoreDriver extends AbstractHBaseTool {
return -2; return -2;
} }
if (tables == null) { if (tables == null) {
System.out.println("ERROR: Backup set '" + setName System.out
+ "' is either empty or does not exist"); .println("ERROR: Backup set '" + setName + "' is either empty or does not exist");
printToolUsage(); printToolUsage();
return -3; return -3;
} }
@ -167,15 +165,16 @@ public class RestoreDriver extends AbstractHBaseTool {
TableName[] sTableArray = BackupUtils.parseTableNames(tables); TableName[] sTableArray = BackupUtils.parseTableNames(tables);
TableName[] tTableArray = BackupUtils.parseTableNames(tableMapping); TableName[] tTableArray = BackupUtils.parseTableNames(tableMapping);
if (sTableArray != null && tTableArray != null && if (
(sTableArray.length != tTableArray.length)) { sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)
) {
System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping); System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping);
printToolUsage(); printToolUsage();
return -4; return -4;
} }
client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check, client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check, sTableArray,
sTableArray, tTableArray, overwrite)); tTableArray, overwrite));
} catch (Exception e) { } catch (Exception e) {
LOG.error("Error while running restore backup", e); LOG.error("Error while running restore backup", e);
return -5; return -5;
@ -184,7 +183,7 @@ public class RestoreDriver extends AbstractHBaseTool {
} }
private String getTablesForSet(Connection conn, String name, Configuration conf) private String getTablesForSet(Connection conn, String name, Configuration conf)
throws IOException { throws IOException {
try (final BackupSystemTable table = new BackupSystemTable(conn)) { try (final BackupSystemTable table = new BackupSystemTable(conn)) {
List<TableName> tables = table.describeBackupSet(name); List<TableName> tables = table.describeBackupSet(name);

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,11 +15,9 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup; package org.apache.hadoop.hbase.backup;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
@ -34,12 +32,12 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface RestoreJob extends Configurable { public interface RestoreJob extends Configurable {
/** /**
* Run restore operation * Run restore operation
* @param dirPaths path array of WAL log directories * @param dirPaths path array of WAL log directories
* @param fromTables from tables * @param fromTables from tables
* @param toTables to tables * @param toTables to tables
* @param fullBackupRestore full backup restore * @param fullBackupRestore full backup restore
* @throws IOException if running the job fails * @throws IOException if running the job fails
*/ */
void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, boolean fullBackupRestore)
boolean fullBackupRestore) throws IOException; throws IOException;
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -25,7 +25,6 @@ import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -57,7 +56,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
public class BackupAdminImpl implements BackupAdmin { public class BackupAdminImpl implements BackupAdmin {
public final static String CHECK_OK = "Checking backup images: OK"; public final static String CHECK_OK = "Checking backup images: OK";
public final static String CHECK_FAILED = public final static String CHECK_FAILED =
"Checking backup images: Failed. Some dependencies are missing for restore"; "Checking backup images: Failed. Some dependencies are missing for restore";
private static final Logger LOG = LoggerFactory.getLogger(BackupAdminImpl.class); private static final Logger LOG = LoggerFactory.getLogger(BackupAdminImpl.class);
private final Connection conn; private final Connection conn;
@ -107,8 +106,8 @@ public class BackupAdminImpl implements BackupAdmin {
deleteSessionStarted = true; deleteSessionStarted = true;
} catch (IOException e) { } catch (IOException e) {
LOG.warn("You can not run delete command while active backup session is in progress. \n" LOG.warn("You can not run delete command while active backup session is in progress. \n"
+ "If there is no active backup session running, run backup repair utility to " + "If there is no active backup session running, run backup repair utility to "
+ "restore \nbackup system integrity."); + "restore \nbackup system integrity.");
return -1; return -1;
} }
@ -158,7 +157,7 @@ public class BackupAdminImpl implements BackupAdmin {
BackupSystemTable.deleteSnapshot(conn); BackupSystemTable.deleteSnapshot(conn);
// We still have record with unfinished delete operation // We still have record with unfinished delete operation
LOG.error("Delete operation failed, please run backup repair utility to restore " LOG.error("Delete operation failed, please run backup repair utility to restore "
+ "backup system integrity", e); + "backup system integrity", e);
throw e; throw e;
} else { } else {
LOG.warn("Delete operation succeeded, there were some errors: ", e); LOG.warn("Delete operation succeeded, there were some errors: ", e);
@ -177,15 +176,15 @@ public class BackupAdminImpl implements BackupAdmin {
/** /**
* Updates incremental backup set for every backupRoot * Updates incremental backup set for every backupRoot
* @param tablesMap map [backupRoot: {@code Set<TableName>}] * @param tablesMap map [backupRoot: {@code Set<TableName>}]
* @param table backup system table * @param table backup system table
* @throws IOException if a table operation fails * @throws IOException if a table operation fails
*/ */
private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table) private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
throws IOException { throws IOException {
for (String backupRoot : tablesMap.keySet()) { for (String backupRoot : tablesMap.keySet()) {
Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot); Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
Map<TableName, ArrayList<BackupInfo>> tableMap = Map<TableName, ArrayList<BackupInfo>> tableMap =
table.getBackupHistoryForTableSet(incrTableSet, backupRoot); table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) { for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) {
if (entry.getValue() == null) { if (entry.getValue() == null) {
// No more backups for a table // No more backups for a table
@ -283,10 +282,10 @@ public class BackupAdminImpl implements BackupAdmin {
} }
private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable) private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable)
throws IOException { throws IOException {
List<TableName> tables = info.getTableNames(); List<TableName> tables = info.getTableNames();
LOG.debug("Remove " + tn + " from " + info.getBackupId() + " tables=" LOG.debug(
+ info.getTableListAsString()); "Remove " + tn + " from " + info.getBackupId() + " tables=" + info.getTableListAsString());
if (tables.contains(tn)) { if (tables.contains(tn)) {
tables.remove(tn); tables.remove(tn);
@ -306,7 +305,7 @@ public class BackupAdminImpl implements BackupAdmin {
} }
private List<BackupInfo> getAffectedBackupSessions(BackupInfo backupInfo, TableName tn, private List<BackupInfo> getAffectedBackupSessions(BackupInfo backupInfo, TableName tn,
BackupSystemTable table) throws IOException { BackupSystemTable table) throws IOException {
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn); LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
long ts = backupInfo.getStartTs(); long ts = backupInfo.getStartTs();
List<BackupInfo> list = new ArrayList<>(); List<BackupInfo> list = new ArrayList<>();
@ -325,7 +324,7 @@ public class BackupAdminImpl implements BackupAdmin {
list.clear(); list.clear();
} else { } else {
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn
+ " added " + info.getBackupId() + " tables=" + info.getTableListAsString()); + " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
list.add(info); list.add(info);
} }
} }
@ -338,7 +337,7 @@ public class BackupAdminImpl implements BackupAdmin {
* @throws IOException if cleaning up the backup directory fails * @throws IOException if cleaning up the backup directory fails
*/ */
private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf) private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf)
throws IOException { throws IOException {
try { try {
// clean up the data at target directory // clean up the data at target directory
String targetDir = backupInfo.getBackupRootDir(); String targetDir = backupInfo.getBackupRootDir();
@ -349,9 +348,8 @@ public class BackupAdminImpl implements BackupAdmin {
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
Path targetDirPath = Path targetDirPath = new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(),
new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
backupInfo.getBackupId(), table));
if (outputFs.delete(targetDirPath, true)) { if (outputFs.delete(targetDirPath, true)) {
LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
} else { } else {
@ -359,13 +357,13 @@ public class BackupAdminImpl implements BackupAdmin {
} }
} catch (IOException e1) { } catch (IOException e1) {
LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table
+ "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + "."); + "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
throw e1; throw e1;
} }
} }
private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime) private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime)
throws IOException { throws IOException {
List<BackupInfo> history = table.getBackupHistory(); List<BackupInfo> history = table.getBackupHistory();
for (BackupInfo info : history) { for (BackupInfo info : history) {
List<TableName> tables = info.getTableNames(); List<TableName> tables = info.getTableNames();
@ -466,7 +464,7 @@ public class BackupAdminImpl implements BackupAdmin {
public void addToBackupSet(String name, TableName[] tables) throws IOException { public void addToBackupSet(String name, TableName[] tables) throws IOException {
String[] tableNames = new String[tables.length]; String[] tableNames = new String[tables.length];
try (final BackupSystemTable table = new BackupSystemTable(conn); try (final BackupSystemTable table = new BackupSystemTable(conn);
final Admin admin = conn.getAdmin()) { final Admin admin = conn.getAdmin()) {
for (int i = 0; i < tables.length; i++) { for (int i = 0; i < tables.length; i++) {
tableNames[i] = tables[i].getNameAsString(); tableNames[i] = tables[i].getNameAsString();
if (!admin.tableExists(TableName.valueOf(tableNames[i]))) { if (!admin.tableExists(TableName.valueOf(tableNames[i]))) {
@ -474,8 +472,8 @@ public class BackupAdminImpl implements BackupAdmin {
} }
} }
table.addToBackupSet(name, tableNames); table.addToBackupSet(name, tableNames);
LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name LOG.info(
+ "' backup set"); "Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + "' backup set");
} }
} }
@ -484,8 +482,8 @@ public class BackupAdminImpl implements BackupAdmin {
LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'"); LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'");
try (final BackupSystemTable table = new BackupSystemTable(conn)) { try (final BackupSystemTable table = new BackupSystemTable(conn)) {
table.removeFromBackupSet(name, toStringArray(tables)); table.removeFromBackupSet(name, toStringArray(tables));
LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name LOG.info(
+ "' completed."); "Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "' completed.");
} }
} }
@ -534,9 +532,9 @@ public class BackupAdminImpl implements BackupAdmin {
} }
if (incrTableSet.isEmpty()) { if (incrTableSet.isEmpty()) {
String msg = "Incremental backup table set contains no tables. " String msg =
+ "You need to run full backup first " "Incremental backup table set contains no tables. " + "You need to run full backup first "
+ (tableList != null ? "on " + StringUtils.join(tableList, ",") : ""); + (tableList != null ? "on " + StringUtils.join(tableList, ",") : "");
throw new IOException(msg); throw new IOException(msg);
} }
@ -545,7 +543,7 @@ public class BackupAdminImpl implements BackupAdmin {
if (!tableList.isEmpty()) { if (!tableList.isEmpty()) {
String extraTables = StringUtils.join(tableList, ","); String extraTables = StringUtils.join(tableList, ",");
String msg = "Some tables (" + extraTables + ") haven't gone through full backup. " String msg = "Some tables (" + extraTables + ") haven't gone through full backup. "
+ "Perform full backup on " + extraTables + " first, " + "then retry the command"; + "Perform full backup on " + extraTables + " first, " + "then retry the command";
throw new IOException(msg); throw new IOException(msg);
} }
} }
@ -554,13 +552,13 @@ public class BackupAdminImpl implements BackupAdmin {
if (tableList != null && !tableList.isEmpty()) { if (tableList != null && !tableList.isEmpty()) {
for (TableName table : tableList) { for (TableName table : tableList) {
String targetTableBackupDir = String targetTableBackupDir =
HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
Path targetTableBackupDirPath = new Path(targetTableBackupDir); Path targetTableBackupDirPath = new Path(targetTableBackupDir);
FileSystem outputFs = FileSystem outputFs =
FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration()); FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration());
if (outputFs.exists(targetTableBackupDirPath)) { if (outputFs.exists(targetTableBackupDirPath)) {
throw new IOException("Target backup directory " + targetTableBackupDir throw new IOException(
+ " exists already."); "Target backup directory " + targetTableBackupDir + " exists already.");
} }
outputFs.mkdirs(targetTableBackupDirPath); outputFs.mkdirs(targetTableBackupDirPath);
} }
@ -581,8 +579,8 @@ public class BackupAdminImpl implements BackupAdmin {
tableList = excludeNonExistingTables(tableList, nonExistingTableList); tableList = excludeNonExistingTables(tableList, nonExistingTableList);
} else { } else {
// Throw exception only in full mode - we try to backup non-existing table // Throw exception only in full mode - we try to backup non-existing table
throw new IOException("Non-existing tables found in the table list: " throw new IOException(
+ nonExistingTableList); "Non-existing tables found in the table list: " + nonExistingTableList);
} }
} }
} }
@ -590,9 +588,9 @@ public class BackupAdminImpl implements BackupAdmin {
// update table list // update table list
BackupRequest.Builder builder = new BackupRequest.Builder(); BackupRequest.Builder builder = new BackupRequest.Builder();
request = builder.withBackupType(request.getBackupType()).withTableList(tableList) request = builder.withBackupType(request.getBackupType()).withTableList(tableList)
.withTargetRootDir(request.getTargetRootDir()) .withTargetRootDir(request.getTargetRootDir()).withBackupSetName(request.getBackupSetName())
.withBackupSetName(request.getBackupSetName()).withTotalTasks(request.getTotalTasks()) .withTotalTasks(request.getTotalTasks()).withBandwidthPerTasks((int) request.getBandwidth())
.withBandwidthPerTasks((int) request.getBandwidth()).build(); .build();
TableBackupClient client; TableBackupClient client;
try { try {
@ -608,7 +606,7 @@ public class BackupAdminImpl implements BackupAdmin {
} }
private List<TableName> excludeNonExistingTables(List<TableName> tableList, private List<TableName> excludeNonExistingTables(List<TableName> tableList,
List<TableName> nonExistingTableList) { List<TableName> nonExistingTableList) {
for (TableName table : nonExistingTableList) { for (TableName table : nonExistingTableList) {
tableList.remove(table); tableList.remove(table);
} }
@ -619,7 +617,7 @@ public class BackupAdminImpl implements BackupAdmin {
public void mergeBackups(String[] backupIds) throws IOException { public void mergeBackups(String[] backupIds) throws IOException {
try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
checkIfValidForMerge(backupIds, sysTable); checkIfValidForMerge(backupIds, sysTable);
//TODO run job on remote cluster // TODO run job on remote cluster
BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration()); BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration());
job.run(backupIds); job.run(backupIds);
} }
@ -627,7 +625,6 @@ public class BackupAdminImpl implements BackupAdmin {
/** /**
* Verifies that backup images are valid for merge. * Verifies that backup images are valid for merge.
*
* <ul> * <ul>
* <li>All backups MUST be in the same destination * <li>All backups MUST be in the same destination
* <li>No FULL backups are allowed - only INCREMENTAL * <li>No FULL backups are allowed - only INCREMENTAL
@ -636,11 +633,11 @@ public class BackupAdminImpl implements BackupAdmin {
* </ul> * </ul>
* <p> * <p>
* @param backupIds list of backup ids * @param backupIds list of backup ids
* @param table backup system table * @param table backup system table
* @throws IOException if the backup image is not valid for merge * @throws IOException if the backup image is not valid for merge
*/ */
private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table)
throws IOException { throws IOException {
String backupRoot = null; String backupRoot = null;
final Set<TableName> allTables = new HashSet<>(); final Set<TableName> allTables = new HashSet<>();
@ -656,7 +653,7 @@ public class BackupAdminImpl implements BackupAdmin {
backupRoot = bInfo.getBackupRootDir(); backupRoot = bInfo.getBackupRootDir();
} else if (!bInfo.getBackupRootDir().equals(backupRoot)) { } else if (!bInfo.getBackupRootDir().equals(backupRoot)) {
throw new IOException("Found different backup destinations in a list of a backup sessions " throw new IOException("Found different backup destinations in a list of a backup sessions "
+ "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir()); + "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir());
} }
if (bInfo.getType() == BackupType.FULL) { if (bInfo.getType() == BackupType.FULL) {
throw new IOException("FULL backup image can not be merged for: \n" + bInfo); throw new IOException("FULL backup image can not be merged for: \n" + bInfo);
@ -664,7 +661,7 @@ public class BackupAdminImpl implements BackupAdmin {
if (bInfo.getState() != BackupState.COMPLETE) { if (bInfo.getState() != BackupState.COMPLETE) {
throw new IOException("Backup image " + backupId throw new IOException("Backup image " + backupId
+ " can not be merged becuase of its state: " + bInfo.getState()); + " can not be merged becuase of its state: " + bInfo.getState());
} }
allBackups.add(backupId); allBackups.add(backupId);
allTables.addAll(bInfo.getTableNames()); allTables.addAll(bInfo.getTableNames());
@ -677,7 +674,7 @@ public class BackupAdminImpl implements BackupAdmin {
} }
} }
final long startRangeTime = minTime; final long startRangeTime = minTime;
final long endRangeTime = maxTime; final long endRangeTime = maxTime;
final String backupDest = backupRoot; final String backupDest = backupRoot;
// Check we have no 'holes' in backup id list // Check we have no 'holes' in backup id list
@ -688,7 +685,7 @@ public class BackupAdminImpl implements BackupAdmin {
BackupInfo.Filter timeRangeFilter = info -> { BackupInfo.Filter timeRangeFilter = info -> {
long time = info.getStartTs(); long time = info.getStartTs();
return time >= startRangeTime && time <= endRangeTime ; return time >= startRangeTime && time <= endRangeTime;
}; };
BackupInfo.Filter tableFilter = info -> { BackupInfo.Filter tableFilter = info -> {
@ -699,20 +696,20 @@ public class BackupAdminImpl implements BackupAdmin {
BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL; BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL;
BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE; BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE;
List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter, List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter, timeRangeFilter,
timeRangeFilter, tableFilter, typeFilter, stateFilter); tableFilter, typeFilter, stateFilter);
if (allInfos.size() != allBackups.size()) { if (allInfos.size() != allBackups.size()) {
// Yes we have at least one hole in backup image sequence // Yes we have at least one hole in backup image sequence
List<String> missingIds = new ArrayList<>(); List<String> missingIds = new ArrayList<>();
for(BackupInfo info: allInfos) { for (BackupInfo info : allInfos) {
if(allBackups.contains(info.getBackupId())) { if (allBackups.contains(info.getBackupId())) {
continue; continue;
} }
missingIds.add(info.getBackupId()); missingIds.add(info.getBackupId());
} }
String errMsg = String errMsg =
"Sequence of backup ids has 'holes'. The following backup images must be added:" + "Sequence of backup ids has 'holes'. The following backup images must be added:"
org.apache.hadoop.util.StringUtils.join(",", missingIds); + org.apache.hadoop.util.StringUtils.join(",", missingIds);
throw new IOException(errMsg); throw new IOException(errMsg);
} }
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.impl; package org.apache.hadoop.hbase.backup.impl;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC;
@ -44,7 +43,6 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.util.List; import java.util.List;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
@ -80,33 +78,32 @@ public final class BackupCommands {
public final static String INCORRECT_USAGE = "Incorrect usage"; public final static String INCORRECT_USAGE = "Incorrect usage";
public final static String TOP_LEVEL_NOT_ALLOWED = public final static String TOP_LEVEL_NOT_ALLOWED =
"Top level (root) folder is not allowed to be a backup destination"; "Top level (root) folder is not allowed to be a backup destination";
public static final String USAGE = "Usage: hbase backup COMMAND [command-specific arguments]\n" public static final String USAGE = "Usage: hbase backup COMMAND [command-specific arguments]\n"
+ "where COMMAND is one of:\n" + " create create a new backup image\n" + "where COMMAND is one of:\n" + " create create a new backup image\n"
+ " delete delete an existing backup image\n" + " delete delete an existing backup image\n"
+ " describe show the detailed information of a backup image\n" + " describe show the detailed information of a backup image\n"
+ " history show history of all successful backups\n" + " history show history of all successful backups\n"
+ " progress show the progress of the latest backup request\n" + " progress show the progress of the latest backup request\n"
+ " set backup set management\n" + " set backup set management\n" + " repair repair backup system table\n"
+ " repair repair backup system table\n" + " merge merge backup images\n"
+ " merge merge backup images\n" + "Run \'hbase backup COMMAND -h\' to see help message for each command\n";
+ "Run \'hbase backup COMMAND -h\' to see help message for each command\n";
public static final String CREATE_CMD_USAGE = public static final String CREATE_CMD_USAGE =
"Usage: hbase backup create <type> <backup_path> [options]\n" "Usage: hbase backup create <type> <backup_path> [options]\n"
+ " type \"full\" to create a full backup image\n" + " type \"full\" to create a full backup image\n"
+ " \"incremental\" to create an incremental backup image\n" + " \"incremental\" to create an incremental backup image\n"
+ " backup_path Full path to store the backup image\n"; + " backup_path Full path to store the backup image\n";
public static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress <backup_id>\n" public static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress <backup_id>\n"
+ " backup_id Backup image id (optional). If no id specified, the command will show\n" + " backup_id Backup image id (optional). If no id specified, the command will show\n"
+ " progress for currently running backup session."; + " progress for currently running backup session.";
public static final String NO_INFO_FOUND = "No info was found for backup id: "; public static final String NO_INFO_FOUND = "No info was found for backup id: ";
public static final String NO_ACTIVE_SESSION_FOUND = "No active backup sessions found."; public static final String NO_ACTIVE_SESSION_FOUND = "No active backup sessions found.";
public static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup describe <backup_id>\n" public static final String DESCRIBE_CMD_USAGE =
+ " backup_id Backup image id\n"; "Usage: hbase backup describe <backup_id>\n" + " backup_id Backup image id\n";
public static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [options]"; public static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [options]";
@ -115,14 +112,13 @@ public final class BackupCommands {
public static final String REPAIR_CMD_USAGE = "Usage: hbase backup repair\n"; public static final String REPAIR_CMD_USAGE = "Usage: hbase backup repair\n";
public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n" public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n"
+ " name Backup set name\n" + " name Backup set name\n" + " tables Comma separated list of tables.\n"
+ " tables Comma separated list of tables.\n" + "COMMAND is one of:\n" + "COMMAND is one of:\n" + " add add tables to a set, create a set if needed\n"
+ " add add tables to a set, create a set if needed\n" + " remove remove tables from a set\n"
+ " remove remove tables from a set\n" + " list list all backup sets in the system\n" + " describe describe set\n"
+ " list list all backup sets in the system\n" + " delete delete backup set\n";
+ " describe describe set\n" + " delete delete backup set\n";
public static final String MERGE_CMD_USAGE = "Usage: hbase backup merge [backup_ids]\n" public static final String MERGE_CMD_USAGE = "Usage: hbase backup merge [backup_ids]\n"
+ " backup_ids Comma separated list of backup image ids.\n"; + " backup_ids Comma separated list of backup image ids.\n";
public static final String USAGE_FOOTER = ""; public static final String USAGE_FOOTER = "";
@ -281,8 +277,10 @@ public final class BackupCommands {
throw new IOException(INCORRECT_USAGE); throw new IOException(INCORRECT_USAGE);
} }
if (!BackupType.FULL.toString().equalsIgnoreCase(args[1]) if (
&& !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])) { !BackupType.FULL.toString().equalsIgnoreCase(args[1])
&& !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])
) {
System.out.println("ERROR: invalid backup type: " + args[1]); System.out.println("ERROR: invalid backup type: " + args[1]);
printUsage(); printUsage();
throw new IOException(INCORRECT_USAGE); throw new IOException(INCORRECT_USAGE);
@ -301,8 +299,8 @@ public final class BackupCommands {
// Check if we have both: backup set and list of tables // Check if we have both: backup set and list of tables
if (cmdline.hasOption(OPTION_TABLE) && cmdline.hasOption(OPTION_SET)) { if (cmdline.hasOption(OPTION_TABLE) && cmdline.hasOption(OPTION_SET)) {
System.out.println("ERROR: You can specify either backup set or list" System.out
+ " of tables, but not both"); .println("ERROR: You can specify either backup set or list" + " of tables, but not both");
printUsage(); printUsage();
throw new IOException(INCORRECT_USAGE); throw new IOException(INCORRECT_USAGE);
} }
@ -315,20 +313,20 @@ public final class BackupCommands {
tables = getTablesForSet(setName, getConf()); tables = getTablesForSet(setName, getConf());
if (tables == null) { if (tables == null) {
System.out.println("ERROR: Backup set '" + setName System.out
+ "' is either empty or does not exist"); .println("ERROR: Backup set '" + setName + "' is either empty or does not exist");
printUsage(); printUsage();
throw new IOException(INCORRECT_USAGE); throw new IOException(INCORRECT_USAGE);
} }
} else { } else {
tables = cmdline.getOptionValue(OPTION_TABLE); tables = cmdline.getOptionValue(OPTION_TABLE);
} }
int bandwidth = int bandwidth = cmdline.hasOption(OPTION_BANDWIDTH)
cmdline.hasOption(OPTION_BANDWIDTH) ? Integer.parseInt(cmdline ? Integer.parseInt(cmdline.getOptionValue(OPTION_BANDWIDTH))
.getOptionValue(OPTION_BANDWIDTH)) : -1; : -1;
int workers = int workers = cmdline.hasOption(OPTION_WORKERS)
cmdline.hasOption(OPTION_WORKERS) ? Integer.parseInt(cmdline ? Integer.parseInt(cmdline.getOptionValue(OPTION_WORKERS))
.getOptionValue(OPTION_WORKERS)) : -1; : -1;
if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) { if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) {
String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME); String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME);
@ -338,13 +336,11 @@ public final class BackupCommands {
try (BackupAdminImpl admin = new BackupAdminImpl(conn)) { try (BackupAdminImpl admin = new BackupAdminImpl(conn)) {
BackupRequest.Builder builder = new BackupRequest.Builder(); BackupRequest.Builder builder = new BackupRequest.Builder();
BackupRequest request = BackupRequest request = builder.withBackupType(BackupType.valueOf(args[1].toUpperCase()))
builder .withTableList(
.withBackupType(BackupType.valueOf(args[1].toUpperCase())) tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null)
.withTableList( .withTargetRootDir(targetBackupDir).withTotalTasks(workers)
tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null) .withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build();
.withTargetRootDir(targetBackupDir).withTotalTasks(workers)
.withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build();
String backupId = admin.backupTables(request); String backupId = admin.backupTables(request);
System.out.println("Backup session " + backupId + " finished. Status: SUCCESS"); System.out.println("Backup session " + backupId + " finished. Status: SUCCESS");
} catch (IOException e) { } catch (IOException e) {
@ -506,8 +502,8 @@ public final class BackupCommands {
public void execute() throws IOException { public void execute() throws IOException {
if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 1) { if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 1) {
System.out.println("No backup id was specified, " System.out.println(
+ "will retrieve the most recent (ongoing) session"); "No backup id was specified, " + "will retrieve the most recent (ongoing) session");
} }
String[] args = cmdline == null ? null : cmdline.getArgs(); String[] args = cmdline == null ? null : cmdline.getArgs();
if (args != null && args.length > 2) { if (args != null && args.length > 2) {
@ -601,15 +597,15 @@ public final class BackupCommands {
}; };
List<BackupInfo> history = null; List<BackupInfo> history = null;
try (final BackupSystemTable sysTable = new BackupSystemTable(conn); try (final BackupSystemTable sysTable = new BackupSystemTable(conn);
BackupAdminImpl admin = new BackupAdminImpl(conn)) { BackupAdminImpl admin = new BackupAdminImpl(conn)) {
history = sysTable.getBackupHistory(-1, dateFilter); history = sysTable.getBackupHistory(-1, dateFilter);
String[] backupIds = convertToBackupIds(history); String[] backupIds = convertToBackupIds(history);
int deleted = admin.deleteBackups(backupIds); int deleted = admin.deleteBackups(backupIds);
System.out.println("Deleted " + deleted + " backups. Total older than " + days + " days: " System.out.println("Deleted " + deleted + " backups. Total older than " + days + " days: "
+ backupIds.length); + backupIds.length);
} catch (IOException e) { } catch (IOException e) {
System.err.println("Delete command FAILED. Please run backup repair tool to restore backup " System.err.println("Delete command FAILED. Please run backup repair tool to restore backup "
+ "system integrity"); + "system integrity");
throw e; throw e;
} }
} }
@ -631,7 +627,7 @@ public final class BackupCommands {
System.out.println("Deleted " + deleted + " backups. Total requested: " + backupIds.length); System.out.println("Deleted " + deleted + " backups. Total requested: " + backupIds.length);
} catch (IOException e) { } catch (IOException e) {
System.err.println("Delete command FAILED. Please run backup repair tool to restore backup " System.err.println("Delete command FAILED. Please run backup repair tool to restore backup "
+ "system integrity"); + "system integrity");
throw e; throw e;
} }
@ -673,14 +669,14 @@ public final class BackupCommands {
Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
try (final Connection conn = ConnectionFactory.createConnection(conf); try (final Connection conn = ConnectionFactory.createConnection(conf);
final BackupSystemTable sysTable = new BackupSystemTable(conn)) { final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
// Failed backup // Failed backup
BackupInfo backupInfo; BackupInfo backupInfo;
List<BackupInfo> list = sysTable.getBackupInfos(BackupState.RUNNING); List<BackupInfo> list = sysTable.getBackupInfos(BackupState.RUNNING);
if (list.size() == 0) { if (list.size() == 0) {
// No failed sessions found // No failed sessions found
System.out.println("REPAIR status: no failed sessions found." System.out.println("REPAIR status: no failed sessions found."
+ " Checking failed delete backup operation ..."); + " Checking failed delete backup operation ...");
repairFailedBackupDeletionIfAny(conn, sysTable); repairFailedBackupDeletionIfAny(conn, sysTable);
repairFailedBackupMergeIfAny(conn, sysTable); repairFailedBackupMergeIfAny(conn, sysTable);
return; return;
@ -694,10 +690,9 @@ public final class BackupCommands {
// set overall backup status: failed // set overall backup status: failed
backupInfo.setState(BackupState.FAILED); backupInfo.setState(BackupState.FAILED);
// compose the backup failed data // compose the backup failed data
String backupFailedData = String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts="
"BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs() + backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
+ ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" + backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
+ backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
System.out.println(backupFailedData); System.out.println(backupFailedData);
TableBackupClient.cleanupAndRestoreBackupSystem(conn, backupInfo, conf); TableBackupClient.cleanupAndRestoreBackupSystem(conn, backupInfo, conf);
// If backup session is updated to FAILED state - means we // If backup session is updated to FAILED state - means we
@ -709,7 +704,7 @@ public final class BackupCommands {
} }
private void repairFailedBackupDeletionIfAny(Connection conn, BackupSystemTable sysTable) private void repairFailedBackupDeletionIfAny(Connection conn, BackupSystemTable sysTable)
throws IOException { throws IOException {
String[] backupIds = sysTable.getListOfBackupIdsFromDeleteOperation(); String[] backupIds = sysTable.getListOfBackupIdsFromDeleteOperation();
if (backupIds == null || backupIds.length == 0) { if (backupIds == null || backupIds.length == 0) {
System.out.println("No failed backup DELETE operation found"); System.out.println("No failed backup DELETE operation found");
@ -730,7 +725,7 @@ public final class BackupCommands {
} }
public static void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTable sysTable) public static void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTable sysTable)
throws IOException { throws IOException {
String[] backupIds = sysTable.getListOfBackupIdsFromMergeOperation(); String[] backupIds = sysTable.getListOfBackupIdsFromMergeOperation();
if (backupIds == null || backupIds.length == 0) { if (backupIds == null || backupIds.length == 0) {
@ -754,9 +749,11 @@ public final class BackupCommands {
} }
boolean res = fs.rename(tmpPath, destPath); boolean res = fs.rename(tmpPath, destPath);
if (!res) { if (!res) {
throw new IOException("MERGE repair: failed to rename from "+ tmpPath+" to "+ destPath); throw new IOException(
"MERGE repair: failed to rename from " + tmpPath + " to " + destPath);
} }
System.out.println("MERGE repair: renamed from "+ tmpPath+" to "+ destPath+" res="+ res); System.out
.println("MERGE repair: renamed from " + tmpPath + " to " + destPath + " res=" + res);
} else { } else {
checkRemoveBackupImages(fs, backupRoot, backupIds); checkRemoveBackupImages(fs, backupRoot, backupIds);
} }
@ -773,16 +770,16 @@ public final class BackupCommands {
private static void checkRemoveBackupImages(FileSystem fs, String backupRoot, private static void checkRemoveBackupImages(FileSystem fs, String backupRoot,
String[] backupIds) throws IOException { String[] backupIds) throws IOException {
String mergedBackupId = BackupUtils.findMostRecentBackupId(backupIds); String mergedBackupId = BackupUtils.findMostRecentBackupId(backupIds);
for (String backupId: backupIds) { for (String backupId : backupIds) {
if (backupId.equals(mergedBackupId)) { if (backupId.equals(mergedBackupId)) {
continue; continue;
} }
Path path = HBackupFileSystem.getBackupPath(backupRoot, backupId); Path path = HBackupFileSystem.getBackupPath(backupRoot, backupId);
if (fs.exists(path)) { if (fs.exists(path)) {
if (!fs.delete(path, true)) { if (!fs.delete(path, true)) {
System.out.println("MERGE repair removing: "+ path +" - FAILED"); System.out.println("MERGE repair removing: " + path + " - FAILED");
} else { } else {
System.out.println("MERGE repair removing: "+ path +" - OK"); System.out.println("MERGE repair removing: " + path + " - OK");
} }
} }
} }
@ -816,23 +813,23 @@ public final class BackupCommands {
String[] args = cmdline == null ? null : cmdline.getArgs(); String[] args = cmdline == null ? null : cmdline.getArgs();
if (args == null || (args.length != 2)) { if (args == null || (args.length != 2)) {
System.err.println("ERROR: wrong number of arguments: " System.err
+ (args == null ? null : args.length)); .println("ERROR: wrong number of arguments: " + (args == null ? null : args.length));
printUsage(); printUsage();
throw new IOException(INCORRECT_USAGE); throw new IOException(INCORRECT_USAGE);
} }
String[] backupIds = args[1].split(","); String[] backupIds = args[1].split(",");
if (backupIds.length < 2) { if (backupIds.length < 2) {
String msg = "ERROR: can not merge a single backup image. "+ String msg = "ERROR: can not merge a single backup image. "
"Number of images must be greater than 1."; + "Number of images must be greater than 1.";
System.err.println(msg); System.err.println(msg);
throw new IOException(msg); throw new IOException(msg);
} }
Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
try (final Connection conn = ConnectionFactory.createConnection(conf); try (final Connection conn = ConnectionFactory.createConnection(conf);
final BackupAdminImpl admin = new BackupAdminImpl(conn)) { final BackupAdminImpl admin = new BackupAdminImpl(conn)) {
admin.mergeBackups(backupIds); admin.mergeBackups(backupIds);
} }
} }
@ -889,7 +886,7 @@ public final class BackupCommands {
} else { } else {
// load from backup FS // load from backup FS
history = history =
BackupUtils.getHistory(getConf(), n, backupRootPath, tableNameFilter, tableSetFilter); BackupUtils.getHistory(getConf(), n, backupRootPath, tableNameFilter, tableSetFilter);
} }
for (BackupInfo info : history) { for (BackupInfo info : history) {
System.out.println(info.getShortDescription()); System.out.println(info.getShortDescription());

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.impl; package org.apache.hadoop.hbase.backup.impl;
import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseIOException;
@ -48,7 +47,7 @@ public class BackupException extends HBaseIOException {
/** /**
* Exception for the given backup that has no previous root cause * Exception for the given backup that has no previous root cause
* @param msg reason why the backup failed * @param msg reason why the backup failed
* @param desc description of the backup that is being failed * @param desc description of the backup that is being failed
*/ */
public BackupException(String msg, BackupInfo desc) { public BackupException(String msg, BackupInfo desc) {
@ -58,9 +57,9 @@ public class BackupException extends HBaseIOException {
/** /**
* Exception for the given backup due to another exception * Exception for the given backup due to another exception
* @param msg reason why the backup failed * @param msg reason why the backup failed
* @param cause root cause of the failure * @param cause root cause of the failure
* @param desc description of the backup that is being failed * @param desc description of the backup that is being failed
*/ */
public BackupException(String msg, Throwable cause, BackupInfo desc) { public BackupException(String msg, Throwable cause, BackupInfo desc) {
super(msg, cause); super(msg, cause);
@ -68,10 +67,9 @@ public class BackupException extends HBaseIOException {
} }
/** /**
* Exception when the description of the backup cannot be determined, due to some other root * Exception when the description of the backup cannot be determined, due to some other root cause
* cause
* @param message description of what caused the failure * @param message description of what caused the failure
* @param e root cause * @param e root cause
*/ */
public BackupException(String message, Exception e) { public BackupException(String message, Exception e) {
super(message, e); super(message, e);

View File

@ -1,5 +1,4 @@
/** /*
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -60,7 +59,7 @@ import org.slf4j.LoggerFactory;
public class BackupManager implements Closeable { public class BackupManager implements Closeable {
// in seconds // in seconds
public final static String BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY = public final static String BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY =
"hbase.backup.exclusive.op.timeout.seconds"; "hbase.backup.exclusive.op.timeout.seconds";
// In seconds // In seconds
private final static int DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT = 3600; private final static int DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT = 3600;
private static final Logger LOG = LoggerFactory.getLogger(BackupManager.class); private static final Logger LOG = LoggerFactory.getLogger(BackupManager.class);
@ -77,10 +76,12 @@ public class BackupManager implements Closeable {
* @throws IOException exception * @throws IOException exception
*/ */
public BackupManager(Connection conn, Configuration conf) throws IOException { public BackupManager(Connection conn, Configuration conf) throws IOException {
if (!conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, if (
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) { !conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)
) {
throw new BackupException("HBase backup is not enabled. Check your " throw new BackupException("HBase backup is not enabled. Check your "
+ BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting."); + BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting.");
} }
this.conf = conf; this.conf = conf;
this.conn = conn; this.conn = conn;
@ -120,12 +121,13 @@ public class BackupManager implements Closeable {
} }
plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS); plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, (plugins == null ? "" : plugins + ",") + conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
BackupHFileCleaner.class.getName()); (plugins == null ? "" : plugins + ",") + BackupHFileCleaner.class.getName());
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Added log cleaner: {}. Added master procedure manager: {}." LOG.debug(
+"Added master procedure manager: {}", cleanerClass, masterProcedureClass, "Added log cleaner: {}. Added master procedure manager: {}."
BackupHFileCleaner.class.getName()); + "Added master procedure manager: {}",
cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName());
} }
} }
@ -163,8 +165,7 @@ public class BackupManager implements Closeable {
} }
/** /**
* Get configuration * Get configuration n
* @return configuration
*/ */
Configuration getConf() { Configuration getConf() {
return conf; return conf;
@ -186,17 +187,15 @@ public class BackupManager implements Closeable {
/** /**
* Creates a backup info based on input backup request. * Creates a backup info based on input backup request.
* @param backupId backup id * @param backupId backup id
* @param type type * @param type type
* @param tableList table list * @param tableList table list
* @param targetRootDir root dir * @param targetRootDir root dir
* @param workers number of parallel workers * @param workers number of parallel workers
* @param bandwidth bandwidth per worker in MB per sec * @param bandwidth bandwidth per worker in MB per sec n * @throws BackupException exception
* @return BackupInfo
* @throws BackupException exception
*/ */
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList, public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList,
String targetRootDir, int workers, long bandwidth) throws BackupException { String targetRootDir, int workers, long bandwidth) throws BackupException {
if (targetRootDir == null) { if (targetRootDir == null) {
throw new BackupException("Wrong backup request parameter: target backup root directory"); throw new BackupException("Wrong backup request parameter: target backup root directory");
} }
@ -292,8 +291,8 @@ public class BackupManager implements Closeable {
BackupImage.Builder builder = BackupImage.newBuilder(); BackupImage.Builder builder = BackupImage.newBuilder();
BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType()) BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
// Only direct ancestors for a backup are required and not entire history of backup for this // Only direct ancestors for a backup are required and not entire history of backup for this
// table resulting in verifying all of the previous backups which is unnecessary and backup // table resulting in verifying all of the previous backups which is unnecessary and backup
@ -320,21 +319,21 @@ public class BackupManager implements Closeable {
if (BackupManifest.canCoverImage(ancestors, image)) { if (BackupManifest.canCoverImage(ancestors, image)) {
LOG.debug("Met the backup boundary of the current table set:"); LOG.debug("Met the backup boundary of the current table set:");
for (BackupImage image1 : ancestors) { for (BackupImage image1 : ancestors) {
LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir()); LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir());
} }
} else { } else {
Path logBackupPath = Path logBackupPath =
HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId()); HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId());
LOG.debug("Current backup has an incremental backup ancestor, " LOG.debug(
+ "touching its image manifest in {}" "Current backup has an incremental backup ancestor, "
+ " to construct the dependency.", logBackupPath.toString()); + "touching its image manifest in {}" + " to construct the dependency.",
logBackupPath.toString());
BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath); BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath);
BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage(); BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
ancestors.add(lastIncrImage); ancestors.add(lastIncrImage);
LOG.debug( LOG.debug("Last dependent incremental backup image: {BackupID={}" + "BackupDir={}}",
"Last dependent incremental backup image: {BackupID={}" + lastIncrImage.getBackupId(), lastIncrImage.getRootDir());
"BackupDir={}}", lastIncrImage.getBackupId(), lastIncrImage.getRootDir());
} }
} }
} }
@ -345,12 +344,12 @@ public class BackupManager implements Closeable {
/** /**
* Get the direct ancestors of this backup for one table involved. * Get the direct ancestors of this backup for one table involved.
* @param backupInfo backup info * @param backupInfo backup info
* @param table table * @param table table
* @return backupImages on the dependency list * @return backupImages on the dependency list
* @throws IOException exception * @throws IOException exception
*/ */
public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo, TableName table) public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo, TableName table)
throws IOException { throws IOException {
ArrayList<BackupImage> ancestors = getAncestors(backupInfo); ArrayList<BackupImage> ancestors = getAncestors(backupInfo);
ArrayList<BackupImage> tableAncestors = new ArrayList<>(); ArrayList<BackupImage> tableAncestors = new ArrayList<>();
for (BackupImage image : ancestors) { for (BackupImage image : ancestors) {
@ -399,11 +398,13 @@ public class BackupManager implements Closeable {
// Restore the interrupted status // Restore the interrupted status
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
} }
if (lastWarningOutputTime == 0 if (
|| (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000) { lastWarningOutputTime == 0
|| (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000
) {
lastWarningOutputTime = EnvironmentEdgeManager.currentTime(); lastWarningOutputTime = EnvironmentEdgeManager.currentTime();
LOG.warn("Waiting to acquire backup exclusive lock for {}s", LOG.warn("Waiting to acquire backup exclusive lock for {}s",
+(lastWarningOutputTime - startTime) / 1000); +(lastWarningOutputTime - startTime) / 1000);
} }
} else { } else {
throw e; throw e;
@ -480,8 +481,8 @@ public class BackupManager implements Closeable {
* @param tables tables * @param tables tables
* @throws IOException exception * @throws IOException exception
*/ */
public void writeRegionServerLogTimestamp(Set<TableName> tables, public void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String, Long> newTimestamps)
Map<String, Long> newTimestamps) throws IOException { throws IOException {
systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, backupInfo.getBackupRootDir()); systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, backupInfo.getBackupRootDir());
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.impl; package org.apache.hadoop.hbase.backup.impl;
import java.io.IOException; import java.io.IOException;
@ -26,7 +25,6 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -50,9 +48,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
/** /**
* Backup manifest contains all the meta data of a backup image. The manifest info will be bundled * Backup manifest contains all the meta data of a backup image. The manifest info will be bundled
* as manifest file together with data. So that each backup image will contain all the info needed * as manifest file together with data. So that each backup image will contain all the info needed
* for restore. BackupManifest is a storage container for BackupImage. * for restore. BackupManifest is a storage container for BackupImage. It is responsible for
* It is responsible for storing/reading backup image data and has some additional utility methods. * storing/reading backup image data and has some additional utility methods.
*
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class BackupManifest { public class BackupManifest {
@ -126,8 +123,8 @@ public class BackupManifest {
super(); super();
} }
private BackupImage(String backupId, BackupType type, String rootDir, private BackupImage(String backupId, BackupType type, String rootDir, List<TableName> tableList,
List<TableName> tableList, long startTs, long completeTs) { long startTs, long completeTs) {
this.backupId = backupId; this.backupId = backupId;
this.type = type; this.type = type;
this.rootDir = rootDir; this.rootDir = rootDir;
@ -149,9 +146,9 @@ public class BackupManifest {
List<BackupProtos.BackupImage> ancestorList = im.getAncestorsList(); List<BackupProtos.BackupImage> ancestorList = im.getAncestorsList();
BackupType type = BackupType type = im.getBackupType() == BackupProtos.BackupType.FULL
im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL ? BackupType.FULL
: BackupType.INCREMENTAL; : BackupType.INCREMENTAL;
BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
for (BackupProtos.BackupImage img : ancestorList) { for (BackupProtos.BackupImage img : ancestorList) {
@ -187,8 +184,8 @@ public class BackupManifest {
return builder.build(); return builder.build();
} }
private static Map<TableName, Map<String, Long>> loadIncrementalTimestampMap( private static Map<TableName, Map<String, Long>>
BackupProtos.BackupImage proto) { loadIncrementalTimestampMap(BackupProtos.BackupImage proto) {
List<BackupProtos.TableServerTimestamp> list = proto.getTstMapList(); List<BackupProtos.TableServerTimestamp> list = proto.getTstMapList();
Map<TableName, Map<String, Long>> incrTimeRanges = new HashMap<>(); Map<TableName, Map<String, Long>> incrTimeRanges = new HashMap<>();
@ -221,13 +218,13 @@ public class BackupManifest {
TableName key = entry.getKey(); TableName key = entry.getKey();
Map<String, Long> value = entry.getValue(); Map<String, Long> value = entry.getValue();
BackupProtos.TableServerTimestamp.Builder tstBuilder = BackupProtos.TableServerTimestamp.Builder tstBuilder =
BackupProtos.TableServerTimestamp.newBuilder(); BackupProtos.TableServerTimestamp.newBuilder();
tstBuilder.setTableName(ProtobufUtil.toProtoTableName(key)); tstBuilder.setTableName(ProtobufUtil.toProtoTableName(key));
for (Map.Entry<String, Long> entry2 : value.entrySet()) { for (Map.Entry<String, Long> entry2 : value.entrySet()) {
String s = entry2.getKey(); String s = entry2.getKey();
BackupProtos.ServerTimestamp.Builder stBuilder = BackupProtos.ServerTimestamp.Builder stBuilder =
BackupProtos.ServerTimestamp.newBuilder(); BackupProtos.ServerTimestamp.newBuilder();
HBaseProtos.ServerName.Builder snBuilder = HBaseProtos.ServerName.newBuilder(); HBaseProtos.ServerName.Builder snBuilder = HBaseProtos.ServerName.newBuilder();
ServerName sn = ServerName.parseServerName(s); ServerName sn = ServerName.parseServerName(s);
snBuilder.setHostName(sn.getHostname()); snBuilder.setHostName(sn.getHostname());
@ -378,10 +375,9 @@ public class BackupManifest {
*/ */
public BackupManifest(BackupInfo backup) { public BackupManifest(BackupInfo backup) {
BackupImage.Builder builder = BackupImage.newBuilder(); BackupImage.Builder builder = BackupImage.newBuilder();
this.backupImage = this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
builder.withBackupId(backup.getBackupId()).withType(backup.getType()) .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
} }
/** /**
@ -393,16 +389,14 @@ public class BackupManifest {
List<TableName> tables = new ArrayList<TableName>(); List<TableName> tables = new ArrayList<TableName>();
tables.add(table); tables.add(table);
BackupImage.Builder builder = BackupImage.newBuilder(); BackupImage.Builder builder = BackupImage.newBuilder();
this.backupImage = this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
builder.withBackupId(backup.getBackupId()).withType(backup.getType()) .withRootDir(backup.getBackupRootDir()).withTableList(tables)
.withRootDir(backup.getBackupRootDir()).withTableList(tables) .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
} }
/** /**
* Construct manifest from a backup directory. * Construct manifest from a backup directory.
* * @param conf configuration
* @param conf configuration
* @param backupPath backup path * @param backupPath backup path
* @throws IOException if constructing the manifest from the backup directory fails * @throws IOException if constructing the manifest from the backup directory fails
*/ */
@ -412,7 +406,7 @@ public class BackupManifest {
/** /**
* Construct manifest from a backup directory. * Construct manifest from a backup directory.
* @param fs the FileSystem * @param fs the FileSystem
* @param backupPath backup path * @param backupPath backup path
* @throws BackupException exception * @throws BackupException exception
*/ */
@ -449,7 +443,7 @@ public class BackupManifest {
} }
this.backupImage = BackupImage.fromProto(proto); this.backupImage = BackupImage.fromProto(proto);
LOG.debug("Loaded manifest instance from manifest file: " LOG.debug("Loaded manifest instance from manifest file: "
+ BackupUtils.getPath(subFile.getPath())); + BackupUtils.getPath(subFile.getPath()));
return; return;
} }
} }
@ -480,10 +474,10 @@ public class BackupManifest {
byte[] data = backupImage.toProto().toByteArray(); byte[] data = backupImage.toProto().toByteArray();
// write the file, overwrite if already exist // write the file, overwrite if already exist
Path manifestFilePath = Path manifestFilePath =
new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), backupImage.getBackupId()),
backupImage.getBackupId()), MANIFEST_FILE_NAME); MANIFEST_FILE_NAME);
try (FSDataOutputStream out = try (FSDataOutputStream out =
manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) { manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) {
out.write(data); out.write(data);
} catch (IOException e) { } catch (IOException e) {
throw new BackupException(e.getMessage()); throw new BackupException(e.getMessage());
@ -531,8 +525,8 @@ public class BackupManifest {
for (BackupImage image : backupImage.getAncestors()) { for (BackupImage image : backupImage.getAncestors()) {
restoreImages.put(Long.valueOf(image.startTs), image); restoreImages.put(Long.valueOf(image.startTs), image);
} }
return new ArrayList<>(reverse ? (restoreImages.descendingMap().values()) return new ArrayList<>(
: (restoreImages.values())); reverse ? (restoreImages.descendingMap().values()) : (restoreImages.values()));
} }
/** /**
@ -614,7 +608,7 @@ public class BackupManifest {
/** /**
* Check whether backup image set could cover a backup image or not. * Check whether backup image set could cover a backup image or not.
* @param fullImages The backup image set * @param fullImages The backup image set
* @param image The target backup image * @param image The target backup image
* @return true if fullImages can cover image, otherwise false * @return true if fullImages can cover image, otherwise false
*/ */
public static boolean canCoverImage(ArrayList<BackupImage> fullImages, BackupImage image) { public static boolean canCoverImage(ArrayList<BackupImage> fullImages, BackupImage image) {
@ -664,8 +658,8 @@ public class BackupManifest {
info.setStartTs(backupImage.getStartTs()); info.setStartTs(backupImage.getStartTs());
info.setBackupRootDir(backupImage.getRootDir()); info.setBackupRootDir(backupImage.getRootDir());
if (backupImage.getType() == BackupType.INCREMENTAL) { if (backupImage.getType() == BackupType.INCREMENTAL) {
info.setHLogTargetDir(BackupUtils.getLogBackupDir(backupImage.getRootDir(), info.setHLogTargetDir(
backupImage.getBackupId())); BackupUtils.getLogBackupDir(backupImage.getRootDir(), backupImage.getBackupId()));
} }
return info; return info;
} }

View File

@ -1,5 +1,4 @@
/** /*
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -69,6 +68,7 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
@ -232,7 +232,7 @@ public final class BackupSystemTable implements Closeable {
long TIMEOUT = 60000; long TIMEOUT = 60000;
long startTime = EnvironmentEdgeManager.currentTime(); long startTime = EnvironmentEdgeManager.currentTime();
LOG.debug("Backup table {} is not present and available, waiting for it to become so", LOG.debug("Backup table {} is not present and available, waiting for it to become so",
tableName); tableName);
while (!admin.tableExists(tableName) || !admin.isTableAvailable(tableName)) { while (!admin.tableExists(tableName) || !admin.isTableAvailable(tableName)) {
try { try {
Thread.sleep(100); Thread.sleep(100);
@ -274,15 +274,17 @@ public final class BackupSystemTable implements Closeable {
Map<byte[], String> readBulkLoadedFiles(String backupId) throws IOException { Map<byte[], String> readBulkLoadedFiles(String backupId) throws IOException {
Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId); Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
try (Table table = connection.getTable(bulkLoadTableName); try (Table table = connection.getTable(bulkLoadTableName);
ResultScanner scanner = table.getScanner(scan)) { ResultScanner scanner = table.getScanner(scan)) {
Result res = null; Result res = null;
Map<byte[], String> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); Map<byte[], String> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
while ((res = scanner.next()) != null) { while ((res = scanner.next()) != null) {
res.advance(); res.advance();
byte[] row = CellUtil.cloneRow(res.listCells().get(0)); byte[] row = CellUtil.cloneRow(res.listCells().get(0));
for (Cell cell : res.listCells()) { for (Cell cell : res.listCells()) {
if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, if (
BackupSystemTable.PATH_COL.length) == 0) { CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0
) {
map.put(row, Bytes.toString(CellUtil.cloneValue(cell))); map.put(row, Bytes.toString(CellUtil.cloneValue(cell)));
} }
} }
@ -298,11 +300,11 @@ public final class BackupSystemTable implements Closeable {
* @return array of Map of family to List of Paths * @return array of Map of family to List of Paths
*/ */
public Map<byte[], List<Path>>[] readBulkLoadedFiles(String backupId, List<TableName> sTableList) public Map<byte[], List<Path>>[] readBulkLoadedFiles(String backupId, List<TableName> sTableList)
throws IOException { throws IOException {
Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId); Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()]; Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()];
try (Table table = connection.getTable(bulkLoadTableName); try (Table table = connection.getTable(bulkLoadTableName);
ResultScanner scanner = table.getScanner(scan)) { ResultScanner scanner = table.getScanner(scan)) {
Result res = null; Result res = null;
while ((res = scanner.next()) != null) { while ((res = scanner.next()) != null) {
res.advance(); res.advance();
@ -310,14 +312,20 @@ public final class BackupSystemTable implements Closeable {
byte[] fam = null; byte[] fam = null;
String path = null; String path = null;
for (Cell cell : res.listCells()) { for (Cell cell : res.listCells()) {
if (CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0, if (
BackupSystemTable.TBL_COL.length) == 0) { CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
BackupSystemTable.TBL_COL.length) == 0
) {
tbl = TableName.valueOf(CellUtil.cloneValue(cell)); tbl = TableName.valueOf(CellUtil.cloneValue(cell));
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0, } else if (
BackupSystemTable.FAM_COL.length) == 0) { CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
BackupSystemTable.FAM_COL.length) == 0
) {
fam = CellUtil.cloneValue(cell); fam = CellUtil.cloneValue(cell);
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, } else if (
BackupSystemTable.PATH_COL.length) == 0) { CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0
) {
path = Bytes.toString(CellUtil.cloneValue(cell)); path = Bytes.toString(CellUtil.cloneValue(cell));
} }
} }
@ -368,7 +376,7 @@ public final class BackupSystemTable implements Closeable {
* @param finalPaths family and associated hfiles * @param finalPaths family and associated hfiles
*/ */
public void writePathsPostBulkLoad(TableName tabName, byte[] region, public void writePathsPostBulkLoad(TableName tabName, byte[] region,
Map<byte[], List<Path>> finalPaths) throws IOException { Map<byte[], List<Path>> finalPaths) throws IOException {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("write bulk load descriptor to backup " + tabName + " with " + finalPaths.size() LOG.debug("write bulk load descriptor to backup " + tabName + " with " + finalPaths.size()
+ " entries"); + " entries");
@ -388,14 +396,14 @@ public final class BackupSystemTable implements Closeable {
* @param pairs list of paths for hfiles * @param pairs list of paths for hfiles
*/ */
public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, final byte[] family, public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, final byte[] family,
final List<Pair<Path, Path>> pairs) throws IOException { final List<Pair<Path, Path>> pairs) throws IOException {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug( LOG.debug(
"write bulk load descriptor to backup " + tabName + " with " + pairs.size() + " entries"); "write bulk load descriptor to backup " + tabName + " with " + pairs.size() + " entries");
} }
try (Table table = connection.getTable(bulkLoadTableName)) { try (Table table = connection.getTable(bulkLoadTableName)) {
List<Put> puts = List<Put> puts =
BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs); BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs);
table.put(puts); table.put(puts);
LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName); LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName);
} }
@ -434,7 +442,7 @@ public final class BackupSystemTable implements Closeable {
Scan scan = BackupSystemTable.createScanForOrigBulkLoadedFiles(tTable); Scan scan = BackupSystemTable.createScanForOrigBulkLoadedFiles(tTable);
Map<String, Map<String, List<Pair<String, Boolean>>>> tblMap = map.get(tTable); Map<String, Map<String, List<Pair<String, Boolean>>>> tblMap = map.get(tTable);
try (Table table = connection.getTable(bulkLoadTableName); try (Table table = connection.getTable(bulkLoadTableName);
ResultScanner scanner = table.getScanner(scan)) { ResultScanner scanner = table.getScanner(scan)) {
Result res = null; Result res = null;
while ((res = scanner.next()) != null) { while ((res = scanner.next()) != null) {
res.advance(); res.advance();
@ -448,14 +456,20 @@ public final class BackupSystemTable implements Closeable {
rows.add(row); rows.add(row);
String rowStr = Bytes.toString(row); String rowStr = Bytes.toString(row);
region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr); region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr);
if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0, if (
BackupSystemTable.FAM_COL.length) == 0) { CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
BackupSystemTable.FAM_COL.length) == 0
) {
fam = Bytes.toString(CellUtil.cloneValue(cell)); fam = Bytes.toString(CellUtil.cloneValue(cell));
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, } else if (
BackupSystemTable.PATH_COL.length) == 0) { CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0
) {
path = Bytes.toString(CellUtil.cloneValue(cell)); path = Bytes.toString(CellUtil.cloneValue(cell));
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0, } else if (
BackupSystemTable.STATE_COL.length) == 0) { CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
BackupSystemTable.STATE_COL.length) == 0
) {
byte[] state = CellUtil.cloneValue(cell); byte[] state = CellUtil.cloneValue(cell);
if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) { if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) {
raw = true; raw = true;
@ -489,7 +503,7 @@ public final class BackupSystemTable implements Closeable {
* @param backupId the backup Id * @param backupId the backup Id
*/ */
public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Path>>[] maps, public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Path>>[] maps,
String backupId) throws IOException { String backupId) throws IOException {
try (Table table = connection.getTable(bulkLoadTableName)) { try (Table table = connection.getTable(bulkLoadTableName)) {
long ts = EnvironmentEdgeManager.currentTime(); long ts = EnvironmentEdgeManager.currentTime();
int cnt = 0; int cnt = 0;
@ -566,7 +580,7 @@ public final class BackupSystemTable implements Closeable {
/** /**
* Write the start code (timestamp) to backup system table. If passed in null, then write 0 byte. * Write the start code (timestamp) to backup system table. If passed in null, then write 0 byte.
* @param startCode start code * @param startCode start code
* @param backupRoot root directory path to backup * @param backupRoot root directory path to backup
* @throws IOException exception * @throws IOException exception
*/ */
@ -583,7 +597,7 @@ public final class BackupSystemTable implements Closeable {
/** /**
* Exclusive operations are: create, delete, merge * Exclusive operations are: create, delete, merge
* @throws IOException if a table operation fails or an active backup exclusive operation is * @throws IOException if a table operation fails or an active backup exclusive operation is
* already underway * already underway
*/ */
public void startBackupExclusiveOperation() throws IOException { public void startBackupExclusiveOperation() throws IOException {
LOG.debug("Start new backup exclusive operation"); LOG.debug("Start new backup exclusive operation");
@ -591,11 +605,15 @@ public final class BackupSystemTable implements Closeable {
try (Table table = connection.getTable(tableName)) { try (Table table = connection.getTable(tableName)) {
Put put = createPutForStartBackupSession(); Put put = createPutForStartBackupSession();
// First try to put if row does not exist // First try to put if row does not exist
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL) if (
.ifNotExists().thenPut(put)) { !table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifNotExists().thenPut(put)
) {
// Row exists, try to put if value == ACTIVE_SESSION_NO // Row exists, try to put if value == ACTIVE_SESSION_NO
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL) if (
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)) { !table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)
) {
throw new ExclusiveOperationException(); throw new ExclusiveOperationException();
} }
} }
@ -613,8 +631,10 @@ public final class BackupSystemTable implements Closeable {
try (Table table = connection.getTable(tableName)) { try (Table table = connection.getTable(tableName)) {
Put put = createPutForStopBackupSession(); Put put = createPutForStopBackupSession();
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL) if (
.ifEquals(ACTIVE_SESSION_YES).thenPut(put)) { !table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
.ifEquals(ACTIVE_SESSION_YES).thenPut(put)
) {
throw new IOException("There is no active backup exclusive operation"); throw new IOException("There is no active backup exclusive operation");
} }
} }
@ -633,13 +653,13 @@ public final class BackupSystemTable implements Closeable {
* @throws IOException exception * @throws IOException exception
*/ */
public HashMap<String, Long> readRegionServerLastLogRollResult(String backupRoot) public HashMap<String, Long> readRegionServerLastLogRollResult(String backupRoot)
throws IOException { throws IOException {
LOG.trace("read region server last roll log result to backup system table"); LOG.trace("read region server last roll log result to backup system table");
Scan scan = createScanForReadRegionServerLastLogRollResult(backupRoot); Scan scan = createScanForReadRegionServerLastLogRollResult(backupRoot);
try (Table table = connection.getTable(tableName); try (Table table = connection.getTable(tableName);
ResultScanner scanner = table.getScanner(scan)) { ResultScanner scanner = table.getScanner(scan)) {
Result res; Result res;
HashMap<String, Long> rsTimestampMap = new HashMap<>(); HashMap<String, Long> rsTimestampMap = new HashMap<>();
while ((res = scanner.next()) != null) { while ((res = scanner.next()) != null) {
@ -656,13 +676,13 @@ public final class BackupSystemTable implements Closeable {
/** /**
* Writes Region Server last roll log result (timestamp) to backup system table table * Writes Region Server last roll log result (timestamp) to backup system table table
* @param server Region Server name * @param server Region Server name
* @param ts last log timestamp * @param ts last log timestamp
* @param backupRoot root directory path to backup * @param backupRoot root directory path to backup
* @throws IOException exception * @throws IOException exception
*/ */
public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot) public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot)
throws IOException { throws IOException {
LOG.trace("write region server last roll log result to backup system table"); LOG.trace("write region server last roll log result to backup system table");
try (Table table = connection.getTable(tableName)) { try (Table table = connection.getTable(tableName)) {
@ -710,7 +730,7 @@ public final class BackupSystemTable implements Closeable {
/** /**
* Get backup history records filtered by list of filters. * Get backup history records filtered by list of filters.
* @param n max number of records, if n == -1 , then max number is ignored * @param n max number of records, if n == -1 , then max number is ignored
* @param filters list of filters * @param filters list of filters
* @return backup records * @return backup records
* @throws IOException if getting the backup history fails * @throws IOException if getting the backup history fails
@ -793,7 +813,7 @@ public final class BackupSystemTable implements Closeable {
} }
public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set, public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
String backupRoot) throws IOException { String backupRoot) throws IOException {
List<BackupInfo> history = getBackupHistory(backupRoot); List<BackupInfo> history = getBackupHistory(backupRoot);
Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>(); Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>();
for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) { for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
@ -829,7 +849,7 @@ public final class BackupSystemTable implements Closeable {
ArrayList<BackupInfo> list = new ArrayList<>(); ArrayList<BackupInfo> list = new ArrayList<>();
try (Table table = connection.getTable(tableName); try (Table table = connection.getTable(tableName);
ResultScanner scanner = table.getScanner(scan)) { ResultScanner scanner = table.getScanner(scan)) {
Result res; Result res;
while ((res = scanner.next()) != null) { while ((res = scanner.next()) != null) {
res.advance(); res.advance();
@ -847,16 +867,16 @@ public final class BackupSystemTable implements Closeable {
* Write the current timestamps for each regionserver to backup system table after a successful * Write the current timestamps for each regionserver to backup system table after a successful
* full or incremental backup. The saved timestamp is of the last log file that was backed up * full or incremental backup. The saved timestamp is of the last log file that was backed up
* already. * already.
* @param tables tables * @param tables tables
* @param newTimestamps timestamps * @param newTimestamps timestamps
* @param backupRoot root directory path to backup * @param backupRoot root directory path to backup
* @throws IOException exception * @throws IOException exception
*/ */
public void writeRegionServerLogTimestamp(Set<TableName> tables, public void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String, Long> newTimestamps,
Map<String, Long> newTimestamps, String backupRoot) throws IOException { String backupRoot) throws IOException {
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace("write RS log time stamps to backup system table for tables [" LOG.trace("write RS log time stamps to backup system table for tables ["
+ StringUtils.join(tables, ",") + "]"); + StringUtils.join(tables, ",") + "]");
} }
List<Put> puts = new ArrayList<>(); List<Put> puts = new ArrayList<>();
for (TableName table : tables) { for (TableName table : tables) {
@ -879,7 +899,7 @@ public final class BackupSystemTable implements Closeable {
* @throws IOException exception * @throws IOException exception
*/ */
public Map<TableName, Map<String, Long>> readLogTimestampMap(String backupRoot) public Map<TableName, Map<String, Long>> readLogTimestampMap(String backupRoot)
throws IOException { throws IOException {
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace("read RS log ts from backup system table for root=" + backupRoot); LOG.trace("read RS log ts from backup system table for root=" + backupRoot);
} }
@ -888,7 +908,7 @@ public final class BackupSystemTable implements Closeable {
Scan scan = createScanForReadLogTimestampMap(backupRoot); Scan scan = createScanForReadLogTimestampMap(backupRoot);
try (Table table = connection.getTable(tableName); try (Table table = connection.getTable(tableName);
ResultScanner scanner = table.getScanner(scan)) { ResultScanner scanner = table.getScanner(scan)) {
Result res; Result res;
while ((res = scanner.next()) != null) { while ((res = scanner.next()) != null) {
res.advance(); res.advance();
@ -899,11 +919,11 @@ public final class BackupSystemTable implements Closeable {
byte[] data = CellUtil.cloneValue(cell); byte[] data = CellUtil.cloneValue(cell);
if (data == null) { if (data == null) {
throw new IOException("Data of last backup data from backup system table " throw new IOException("Data of last backup data from backup system table "
+ "is empty. Create a backup first."); + "is empty. Create a backup first.");
} }
if (data != null && data.length > 0) { if (data != null && data.length > 0) {
HashMap<String, Long> lastBackup = HashMap<String, Long> lastBackup =
fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data)); fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data));
tableTimestampMap.put(tn, lastBackup); tableTimestampMap.put(tn, lastBackup);
} }
} }
@ -912,11 +932,11 @@ public final class BackupSystemTable implements Closeable {
} }
private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table, private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table,
Map<String, Long> map) { Map<String, Long> map) {
BackupProtos.TableServerTimestamp.Builder tstBuilder = BackupProtos.TableServerTimestamp.Builder tstBuilder =
BackupProtos.TableServerTimestamp.newBuilder(); BackupProtos.TableServerTimestamp.newBuilder();
tstBuilder tstBuilder
.setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table)); .setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
for (Entry<String, Long> entry : map.entrySet()) { for (Entry<String, Long> entry : map.entrySet()) {
BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder(); BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder();
@ -939,7 +959,7 @@ public final class BackupSystemTable implements Closeable {
List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList(); List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
for (BackupProtos.ServerTimestamp st : list) { for (BackupProtos.ServerTimestamp st : list) {
ServerName sn = ServerName sn =
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(st.getServerName()); org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(st.getServerName());
map.put(sn.getHostname() + ":" + sn.getPort(), st.getTimestamp()); map.put(sn.getHostname() + ":" + sn.getPort(), st.getTimestamp());
} }
return map; return map;
@ -973,12 +993,12 @@ public final class BackupSystemTable implements Closeable {
/** /**
* Add tables to global incremental backup set * Add tables to global incremental backup set
* @param tables set of tables * @param tables set of tables
* @param backupRoot root directory path to backup * @param backupRoot root directory path to backup
* @throws IOException exception * @throws IOException exception
*/ */
public void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot) public void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot)
throws IOException { throws IOException {
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace("Add incremental backup table set to backup system table. ROOT=" + backupRoot LOG.trace("Add incremental backup table set to backup system table. ROOT=" + backupRoot
+ " tables [" + StringUtils.join(tables, " ") + "]"); + " tables [" + StringUtils.join(tables, " ") + "]");
@ -1019,7 +1039,7 @@ public final class BackupSystemTable implements Closeable {
Scan scan = createScanForBackupHistory(); Scan scan = createScanForBackupHistory();
scan.setCaching(1); scan.setCaching(1);
try (Table table = connection.getTable(tableName); try (Table table = connection.getTable(tableName);
ResultScanner scanner = table.getScanner(scan)) { ResultScanner scanner = table.getScanner(scan)) {
if (scanner.next() != null) { if (scanner.next() != null) {
result = true; result = true;
} }
@ -1073,13 +1093,13 @@ public final class BackupSystemTable implements Closeable {
res.advance(); res.advance();
String[] tables = cellValueToBackupSet(res.current()); String[] tables = cellValueToBackupSet(res.current());
return Arrays.asList(tables).stream().map(item -> TableName.valueOf(item)) return Arrays.asList(tables).stream().map(item -> TableName.valueOf(item))
.collect(Collectors.toList()); .collect(Collectors.toList());
} }
} }
/** /**
* Add backup set (list of tables) * Add backup set (list of tables)
* @param name set name * @param name set name
* @param newTables list of tables, comma-separated * @param newTables list of tables, comma-separated
* @throws IOException if a table operation fails * @throws IOException if a table operation fails
*/ */
@ -1105,7 +1125,7 @@ public final class BackupSystemTable implements Closeable {
/** /**
* Remove tables from backup set (list of tables) * Remove tables from backup set (list of tables)
* @param name set name * @param name set name
* @param toRemove list of tables * @param toRemove list of tables
* @throws IOException if a table operation or deleting the backup set fails * @throws IOException if a table operation or deleting the backup set fails
*/ */
@ -1132,7 +1152,7 @@ public final class BackupSystemTable implements Closeable {
table.put(put); table.put(put);
} else if (disjoint.length == tables.length) { } else if (disjoint.length == tables.length) {
LOG.warn("Backup set '" + name + "' does not contain tables [" LOG.warn("Backup set '" + name + "' does not contain tables ["
+ StringUtils.join(toRemove, " ") + "]"); + StringUtils.join(toRemove, " ") + "]");
} else { // disjoint.length == 0 and tables.length >0 } else { // disjoint.length == 0 and tables.length >0
// Delete backup set // Delete backup set
LOG.info("Backup set '" + name + "' is empty. Deleting."); LOG.info("Backup set '" + name + "' is empty. Deleting.");
@ -1176,7 +1196,7 @@ public final class BackupSystemTable implements Closeable {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(getTableName(conf)); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(getTableName(conf));
ColumnFamilyDescriptorBuilder colBuilder = ColumnFamilyDescriptorBuilder colBuilder =
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY); ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
colBuilder.setMaxVersions(1); colBuilder.setMaxVersions(1);
Configuration config = HBaseConfiguration.create(); Configuration config = HBaseConfiguration.create();
@ -1213,10 +1233,10 @@ public final class BackupSystemTable implements Closeable {
*/ */
public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) { public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) {
TableDescriptorBuilder builder = TableDescriptorBuilder builder =
TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf)); TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf));
ColumnFamilyDescriptorBuilder colBuilder = ColumnFamilyDescriptorBuilder colBuilder =
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY); ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
colBuilder.setMaxVersions(1); colBuilder.setMaxVersions(1);
Configuration config = HBaseConfiguration.create(); Configuration config = HBaseConfiguration.create();
int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY, int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
@ -1375,11 +1395,11 @@ public final class BackupSystemTable implements Closeable {
/** /**
* Creates Put to write RS last roll log timestamp map * Creates Put to write RS last roll log timestamp map
* @param table table * @param table table
* @param smap map, containing RS:ts * @param smap map, containing RS:ts
* @return put operation * @return put operation
*/ */
private Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, private Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap,
String backupRoot) { String backupRoot) {
Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString())); Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString()));
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("log-roll-map"), smap); put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("log-roll-map"), smap);
return put; return put;
@ -1414,12 +1434,12 @@ public final class BackupSystemTable implements Closeable {
/** /**
* Creates Put to store RS last log result * Creates Put to store RS last log result
* @param server server name * @param server server name
* @param timestamp log roll result (timestamp) * @param timestamp log roll result (timestamp)
* @return put operation * @return put operation
*/ */
private Put createPutForRegionServerLastLogRollResult(String server, Long timestamp, private Put createPutForRegionServerLastLogRollResult(String server, Long timestamp,
String backupRoot) { String backupRoot) {
Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server)); Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server));
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("rs-log-ts"), put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("rs-log-ts"),
Bytes.toBytes(timestamp)); Bytes.toBytes(timestamp));
@ -1458,7 +1478,7 @@ public final class BackupSystemTable implements Closeable {
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles * Creates Put's for bulk load resulting from running LoadIncrementalHFiles
*/ */
static List<Put> createPutForCommittedBulkload(TableName table, byte[] region, static List<Put> createPutForCommittedBulkload(TableName table, byte[] region,
Map<byte[], List<Path>> finalPaths) { Map<byte[], List<Path>> finalPaths) {
List<Put> puts = new ArrayList<>(); List<Put> puts = new ArrayList<>();
for (Map.Entry<byte[], List<Path>> entry : finalPaths.entrySet()) { for (Map.Entry<byte[], List<Path>> entry : finalPaths.entrySet()) {
for (Path path : entry.getValue()) { for (Path path : entry.getValue()) {
@ -1472,8 +1492,8 @@ public final class BackupSystemTable implements Closeable {
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file)); put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file));
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT); put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
puts.add(put); puts.add(put);
LOG.debug( LOG
"writing done bulk path " + file + " for " + table + " " + Bytes.toString(region)); .debug("writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
} }
} }
return puts; return puts;
@ -1538,7 +1558,7 @@ public final class BackupSystemTable implements Closeable {
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles * Creates Put's for bulk load resulting from running LoadIncrementalHFiles
*/ */
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family, static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family,
final List<Pair<Path, Path>> pairs) { final List<Pair<Path, Path>> pairs) {
List<Put> puts = new ArrayList<>(pairs.size()); List<Put> puts = new ArrayList<>(pairs.size());
for (Pair<Path, Path> pair : pairs) { for (Pair<Path, Path> pair : pairs) {
Path path = pair.getSecond(); Path path = pair.getSecond();
@ -1740,8 +1760,8 @@ public final class BackupSystemTable implements Closeable {
*/ */
static Scan createScanForBulkLoadedFiles(String backupId) { static Scan createScanForBulkLoadedFiles(String backupId) {
Scan scan = new Scan(); Scan scan = new Scan();
byte[] startRow = backupId == null ? BULK_LOAD_PREFIX_BYTES byte[] startRow =
: rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM); backupId == null ? BULK_LOAD_PREFIX_BYTES : rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM);
byte[] stopRow = Arrays.copyOf(startRow, startRow.length); byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
scan.withStartRow(startRow); scan.withStartRow(startRow);
@ -1752,7 +1772,7 @@ public final class BackupSystemTable implements Closeable {
} }
static Put createPutForBulkLoadedFile(TableName tn, byte[] fam, String p, String backupId, static Put createPutForBulkLoadedFile(TableName tn, byte[] fam, String p, String backupId,
long ts, int idx) { long ts, int idx) {
Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM + ts + BLK_LD_DELIM + idx)); Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM + ts + BLK_LD_DELIM + idx));
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName()); put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName());
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam); put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam);
@ -1798,7 +1818,7 @@ public final class BackupSystemTable implements Closeable {
/** /**
* Creates Put operation to update backup set content * Creates Put operation to update backup set content
* @param name backup set's name * @param name backup set's name
* @param tables list of tables * @param tables list of tables
* @return put operation * @return put operation
*/ */

View File

@ -1,5 +1,4 @@
/** /*
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -19,7 +18,6 @@
package org.apache.hadoop.hbase.backup.impl; package org.apache.hadoop.hbase.backup.impl;
import java.io.IOException; import java.io.IOException;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.impl; package org.apache.hadoop.hbase.backup.impl;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_ATTEMPTS_PAUSE_MS_KEY; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_ATTEMPTS_PAUSE_MS_KEY;
@ -28,7 +27,6 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupCopyJob; import org.apache.hadoop.hbase.backup.BackupCopyJob;
import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo;
@ -48,7 +46,6 @@ import org.slf4j.LoggerFactory;
/** /**
* Full table backup implementation * Full table backup implementation
*
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class FullTableBackupClient extends TableBackupClient { public class FullTableBackupClient extends TableBackupClient {
@ -58,7 +55,7 @@ public class FullTableBackupClient extends TableBackupClient {
} }
public FullTableBackupClient(final Connection conn, final String backupId, BackupRequest request) public FullTableBackupClient(final Connection conn, final String backupId, BackupRequest request)
throws IOException { throws IOException {
super(conn, backupId, request); super(conn, backupId, request);
} }
@ -117,7 +114,7 @@ public class FullTableBackupClient extends TableBackupClient {
LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + ".");
throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3]
+ " with reason code " + res); + " with reason code " + res);
} }
conf.unset(JOB_NAME_CONF_KEY); conf.unset(JOB_NAME_CONF_KEY);
@ -127,7 +124,6 @@ public class FullTableBackupClient extends TableBackupClient {
/** /**
* Backup request execution. * Backup request execution.
*
* @throws IOException if the execution of the backup fails * @throws IOException if the execution of the backup fails
*/ */
@Override @Override
@ -163,9 +159,8 @@ public class FullTableBackupClient extends TableBackupClient {
// SNAPSHOT_TABLES: // SNAPSHOT_TABLES:
backupInfo.setPhase(BackupPhase.SNAPSHOT); backupInfo.setPhase(BackupPhase.SNAPSHOT);
for (TableName tableName : tableList) { for (TableName tableName : tableList) {
String snapshotName = String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime())
"snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_" + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
+ tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
snapshotTable(admin, tableName, snapshotName); snapshotTable(admin, tableName, snapshotName);
backupInfo.setSnapshotName(tableName, snapshotName); backupInfo.setSnapshotName(tableName, snapshotName);
@ -187,12 +182,11 @@ public class FullTableBackupClient extends TableBackupClient {
backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps); backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
Map<TableName, Map<String, Long>> newTableSetTimestampMap = Map<TableName, Map<String, Long>> newTableSetTimestampMap =
backupManager.readLogTimestampMap(); backupManager.readLogTimestampMap();
backupInfo.setTableSetTimestampMap(newTableSetTimestampMap); backupInfo.setTableSetTimestampMap(newTableSetTimestampMap);
Long newStartCode = Long newStartCode =
BackupUtils.getMinValue(BackupUtils BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode); backupManager.writeBackupStartCode(newStartCode);
// backup complete // backup complete
@ -205,11 +199,9 @@ public class FullTableBackupClient extends TableBackupClient {
} }
protected void snapshotTable(Admin admin, TableName tableName, String snapshotName) protected void snapshotTable(Admin admin, TableName tableName, String snapshotName)
throws IOException { throws IOException {
int maxAttempts = int maxAttempts = conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS);
conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS); int pause = conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS);
int pause =
conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS);
int attempts = 0; int attempts = 0;
while (attempts++ < maxAttempts) { while (attempts++ < maxAttempts) {
@ -218,7 +210,7 @@ public class FullTableBackupClient extends TableBackupClient {
return; return;
} catch (IOException ee) { } catch (IOException ee) {
LOG.warn("Snapshot attempt " + attempts + " failed for table " + tableName LOG.warn("Snapshot attempt " + attempts + " failed for table " + tableName
+ ", sleeping for " + pause + "ms", ee); + ", sleeping for " + pause + "ms", ee);
if (attempts < maxAttempts) { if (attempts < maxAttempts) {
try { try {
Thread.sleep(pause); Thread.sleep(pause);
@ -229,6 +221,6 @@ public class FullTableBackupClient extends TableBackupClient {
} }
} }
} }
throw new IOException("Failed to snapshot table "+ tableName); throw new IOException("Failed to snapshot table " + tableName);
} }
} }

View File

@ -1,13 +1,13 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file * regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.impl; package org.apache.hadoop.hbase.backup.impl;
import java.io.IOException; import java.io.IOException;
@ -77,11 +76,11 @@ public class IncrementalBackupManager extends BackupManager {
LOG.debug("StartCode " + savedStartCode + "for backupID " + backupInfo.getBackupId()); LOG.debug("StartCode " + savedStartCode + "for backupID " + backupInfo.getBackupId());
} }
// get all new log files from .logs and .oldlogs after last TS and before new timestamp // get all new log files from .logs and .oldlogs after last TS and before new timestamp
if (savedStartCode == null || previousTimestampMins == null if (
|| previousTimestampMins.isEmpty()) { savedStartCode == null || previousTimestampMins == null || previousTimestampMins.isEmpty()
throw new IOException( ) {
"Cannot read any previous back up timestamps from backup system table. " throw new IOException("Cannot read any previous back up timestamps from backup system table. "
+ "In order to create an incremental backup, at least one full backup is needed."); + "In order to create an incremental backup, at least one full backup is needed.");
} }
LOG.info("Execute roll log procedure for incremental backup ..."); LOG.info("Execute roll log procedure for incremental backup ...");
@ -103,9 +102,9 @@ public class IncrementalBackupManager extends BackupManager {
private List<String> excludeProcV2WALs(List<String> logList) { private List<String> excludeProcV2WALs(List<String> logList) {
List<String> list = new ArrayList<>(); List<String> list = new ArrayList<>();
for (int i=0; i < logList.size(); i++) { for (int i = 0; i < logList.size(); i++) {
Path p = new Path(logList.get(i)); Path p = new Path(logList.get(i));
String name = p.getName(); String name = p.getName();
if (name.startsWith(WALProcedureStore.LOG_PREFIX)) { if (name.startsWith(WALProcedureStore.LOG_PREFIX)) {
continue; continue;
@ -119,18 +118,18 @@ public class IncrementalBackupManager extends BackupManager {
/** /**
* For each region server: get all log files newer than the last timestamps but not newer than the * For each region server: get all log files newer than the last timestamps but not newer than the
* newest timestamps. * newest timestamps.
* @param olderTimestamps the timestamp for each region server of the last backup. * @param olderTimestamps the timestamp for each region server of the last backup.
* @param newestTimestamps the timestamp for each region server that the backup should lead to. * @param newestTimestamps the timestamp for each region server that the backup should lead to.
* @param conf the Hadoop and Hbase configuration * @param conf the Hadoop and Hbase configuration
* @param savedStartCode the startcode (timestamp) of last successful backup. * @param savedStartCode the startcode (timestamp) of last successful backup.
* @return a list of log files to be backed up * @return a list of log files to be backed up
* @throws IOException exception * @throws IOException exception
*/ */
private List<String> getLogFilesForNewBackup(Map<String, Long> olderTimestamps, private List<String> getLogFilesForNewBackup(Map<String, Long> olderTimestamps,
Map<String, Long> newestTimestamps, Configuration conf, String savedStartCode) Map<String, Long> newestTimestamps, Configuration conf, String savedStartCode)
throws IOException { throws IOException {
LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps
+ "\n newestTimestamps: " + newestTimestamps); + "\n newestTimestamps: " + newestTimestamps);
Path walRootDir = CommonFSUtils.getWALRootDir(conf); Path walRootDir = CommonFSUtils.getWALRootDir(conf);
Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME); Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
@ -191,10 +190,10 @@ public class IncrementalBackupManager extends BackupManager {
// or RS is down (was decommisioned). In any case, we treat this // or RS is down (was decommisioned). In any case, we treat this
// log file as eligible for inclusion into incremental backup log list // log file as eligible for inclusion into incremental backup log list
Long ts = newestTimestamps.get(host); Long ts = newestTimestamps.get(host);
if (ts == null) { if (ts == null) {
LOG.warn("ORPHAN log found: " + log + " host=" + host); LOG.warn("ORPHAN log found: " + log + " host=" + host);
LOG.debug("Known hosts (from newestTimestamps):"); LOG.debug("Known hosts (from newestTimestamps):");
for (String s: newestTimestamps.keySet()) { for (String s : newestTimestamps.keySet()) {
LOG.debug(s); LOG.debug(s);
} }
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.impl; package org.apache.hadoop.hbase.backup.impl;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
@ -53,9 +52,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
/** /**
* Incremental backup implementation. * Incremental backup implementation. See the {@link #execute() execute} method.
* See the {@link #execute() execute} method.
*
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class IncrementalTableBackupClient extends TableBackupClient { public class IncrementalTableBackupClient extends TableBackupClient {
@ -65,7 +62,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
} }
public IncrementalTableBackupClient(final Connection conn, final String backupId, public IncrementalTableBackupClient(final Connection conn, final String backupId,
BackupRequest request) throws IOException { BackupRequest request) throws IOException {
super(conn, backupId, request); super(conn, backupId, request);
} }
@ -105,19 +102,19 @@ public class IncrementalTableBackupClient extends TableBackupClient {
} }
/* /*
* Reads bulk load records from backup table, iterates through the records and forms the paths * Reads bulk load records from backup table, iterates through the records and forms the paths for
* for bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination * bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination
* @param sTableList list of tables to be backed up * @param sTableList list of tables to be backed up
* @return map of table to List of files * @return map of table to List of files
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
protected Map<byte[], List<Path>>[] handleBulkLoad(List<TableName> sTableList) protected Map<byte[], List<Path>>[] handleBulkLoad(List<TableName> sTableList)
throws IOException { throws IOException {
Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList.size()]; Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList.size()];
List<String> activeFiles = new ArrayList<>(); List<String> activeFiles = new ArrayList<>();
List<String> archiveFiles = new ArrayList<>(); List<String> archiveFiles = new ArrayList<>();
Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair = Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair =
backupManager.readBulkloadRows(sTableList); backupManager.readBulkloadRows(sTableList);
Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map = pair.getFirst(); Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map = pair.getFirst();
FileSystem tgtFs; FileSystem tgtFs;
try { try {
@ -128,8 +125,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
Path rootdir = CommonFSUtils.getRootDir(conf); Path rootdir = CommonFSUtils.getRootDir(conf);
Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId); Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId);
for (Map.Entry<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> tblEntry : for (Map.Entry<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> tblEntry : map
map.entrySet()) { .entrySet()) {
TableName srcTable = tblEntry.getKey(); TableName srcTable = tblEntry.getKey();
int srcIdx = getIndex(srcTable, sTableList); int srcIdx = getIndex(srcTable, sTableList);
@ -142,14 +139,14 @@ public class IncrementalTableBackupClient extends TableBackupClient {
} }
Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable); Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable);
Path tgtTable = new Path(new Path(tgtRoot, srcTable.getNamespaceAsString()), Path tgtTable = new Path(new Path(tgtRoot, srcTable.getNamespaceAsString()),
srcTable.getQualifierAsString()); srcTable.getQualifierAsString());
for (Map.Entry<String,Map<String,List<Pair<String, Boolean>>>> regionEntry : for (Map.Entry<String, Map<String, List<Pair<String, Boolean>>>> regionEntry : tblEntry
tblEntry.getValue().entrySet()){ .getValue().entrySet()) {
String regionName = regionEntry.getKey(); String regionName = regionEntry.getKey();
Path regionDir = new Path(tblDir, regionName); Path regionDir = new Path(tblDir, regionName);
// map from family to List of hfiles // map from family to List of hfiles
for (Map.Entry<String,List<Pair<String, Boolean>>> famEntry : for (Map.Entry<String, List<Pair<String, Boolean>>> famEntry : regionEntry.getValue()
regionEntry.getValue().entrySet()) { .entrySet()) {
String fam = famEntry.getKey(); String fam = famEntry.getKey();
Path famDir = new Path(regionDir, fam); Path famDir = new Path(regionDir, fam);
List<Path> files; List<Path> files;
@ -170,7 +167,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
int idx = file.lastIndexOf("/"); int idx = file.lastIndexOf("/");
String filename = file; String filename = file;
if (idx > 0) { if (idx > 0) {
filename = file.substring(idx+1); filename = file.substring(idx + 1);
} }
Path p = new Path(famDir, filename); Path p = new Path(famDir, filename);
Path tgt = new Path(tgtFam, filename); Path tgt = new Path(tgtFam, filename);
@ -183,7 +180,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
LOG.trace("copying " + p + " to " + tgt); LOG.trace("copying " + p + " to " + tgt);
} }
activeFiles.add(p.toString()); activeFiles.add(p.toString());
} else if (fs.exists(archive)){ } else if (fs.exists(archive)) {
LOG.debug("copying archive " + archive + " to " + tgt); LOG.debug("copying archive " + archive + " to " + tgt);
archiveFiles.add(archive.toString()); archiveFiles.add(archive.toString());
} }
@ -199,7 +196,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
} }
private void copyBulkLoadedFiles(List<String> activeFiles, List<String> archiveFiles) private void copyBulkLoadedFiles(List<String> activeFiles, List<String> archiveFiles)
throws IOException { throws IOException {
try { try {
// Enable special mode of BackupDistCp // Enable special mode of BackupDistCp
conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 5); conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 5);
@ -207,8 +204,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
String tgtDest = backupInfo.getBackupRootDir() + Path.SEPARATOR + backupInfo.getBackupId(); String tgtDest = backupInfo.getBackupRootDir() + Path.SEPARATOR + backupInfo.getBackupId();
int attempt = 1; int attempt = 1;
while (activeFiles.size() > 0) { while (activeFiles.size() > 0) {
LOG.info("Copy "+ activeFiles.size() + LOG.info(
" active bulk loaded files. Attempt ="+ (attempt++)); "Copy " + activeFiles.size() + " active bulk loaded files. Attempt =" + (attempt++));
String[] toCopy = new String[activeFiles.size()]; String[] toCopy = new String[activeFiles.size()];
activeFiles.toArray(toCopy); activeFiles.toArray(toCopy);
// Active file can be archived during copy operation, // Active file can be archived during copy operation,
@ -245,7 +242,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
} }
private void updateFileLists(List<String> activeFiles, List<String> archiveFiles) private void updateFileLists(List<String> activeFiles, List<String> archiveFiles)
throws IOException { throws IOException {
List<String> newlyArchived = new ArrayList<>(); List<String> newlyArchived = new ArrayList<>();
for (String spath : activeFiles) { for (String spath : activeFiles) {
@ -269,9 +266,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
beginBackup(backupManager, backupInfo); beginBackup(backupManager, backupInfo);
backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL); backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL);
LOG.debug("For incremental backup, current table set is " LOG.debug("For incremental backup, current table set is "
+ backupManager.getIncrementalBackupTableSet()); + backupManager.getIncrementalBackupTableSet());
newTimestamps = newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
} catch (Exception e) { } catch (Exception e) {
// fail the overall backup and return // fail the overall backup and return
failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ", failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ",
@ -285,8 +281,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
BackupUtils.copyTableRegionInfo(conn, backupInfo, conf); BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
// convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
convertWALsToHFiles(); convertWALsToHFiles();
incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()}, incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() },
backupInfo.getBackupRootDir()); backupInfo.getBackupRootDir());
} catch (Exception e) { } catch (Exception e) {
String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId; String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId;
// fail the overall backup and return // fail the overall backup and return
@ -298,8 +294,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
// After this checkpoint, even if entering cancel process, will let the backup finished // After this checkpoint, even if entering cancel process, will let the backup finished
try { try {
// Set the previousTimestampMap which is before this current log roll to the manifest. // Set the previousTimestampMap which is before this current log roll to the manifest.
Map<TableName, Map<String, Long>> previousTimestampMap = Map<TableName, Map<String, Long>> previousTimestampMap = backupManager.readLogTimestampMap();
backupManager.readLogTimestampMap();
backupInfo.setIncrTimestampMap(previousTimestampMap); backupInfo.setIncrTimestampMap(previousTimestampMap);
// The table list in backupInfo is good for both full backup and incremental backup. // The table list in backupInfo is good for both full backup and incremental backup.
@ -307,11 +302,11 @@ public class IncrementalTableBackupClient extends TableBackupClient {
backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps); backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
Map<TableName, Map<String, Long>> newTableSetTimestampMap = Map<TableName, Map<String, Long>> newTableSetTimestampMap =
backupManager.readLogTimestampMap(); backupManager.readLogTimestampMap();
backupInfo.setTableSetTimestampMap(newTableSetTimestampMap); backupInfo.setTableSetTimestampMap(newTableSetTimestampMap);
Long newStartCode = Long newStartCode =
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode); backupManager.writeBackupStartCode(newStartCode);
handleBulkLoad(backupInfo.getTableNames()); handleBulkLoad(backupInfo.getTableNames());
@ -345,11 +340,11 @@ public class IncrementalTableBackupClient extends TableBackupClient {
int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr); int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr);
if (res != 0) { if (res != 0) {
LOG.error("Copy incremental HFile files failed with return code: " + res + "."); LOG.error("Copy incremental HFile files failed with return code: " + res + ".");
throw new IOException("Failed copy from " + StringUtils.join(files, ',') throw new IOException(
+ " to " + backupDest); "Failed copy from " + StringUtils.join(files, ',') + " to " + backupDest);
} }
LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',') LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',') + " to " + backupDest
+ " to " + backupDest + " finished."); + " finished.");
} finally { } finally {
deleteBulkLoadDirectory(); deleteBulkLoadDirectory();
} }
@ -398,7 +393,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
// a Map task for each file. We use ';' as separator // a Map task for each file. We use ';' as separator
// because WAL file names contains ',' // because WAL file names contains ','
String dirs = StringUtils.join(dirPaths, ';'); String dirs = StringUtils.join(dirPaths, ';');
String jobname = "Incremental_Backup-" + backupId ; String jobname = "Incremental_Backup-" + backupId;
Path bulkOutputPath = getBulkOutputDir(); Path bulkOutputPath = getBulkOutputDir();
conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
@ -410,7 +405,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
try { try {
player.setConf(conf); player.setConf(conf);
int result = player.run(playerArgs); int result = player.run(playerArgs);
if(result != 0) { if (result != 0) {
throw new IOException("WAL Player failed"); throw new IOException("WAL Player failed");
} }
conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY); conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY);
@ -419,7 +414,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
throw e; throw e;
} catch (Exception ee) { } catch (Exception ee) {
throw new IOException("Can not convert from directory " + dirs throw new IOException("Can not convert from directory " + dirs
+ " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee); + " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee);
} }
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.impl; package org.apache.hadoop.hbase.backup.impl;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
@ -25,7 +24,6 @@ import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.TreeSet; import java.util.TreeSet;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -47,7 +45,6 @@ import org.slf4j.LoggerFactory;
/** /**
* Restore table implementation * Restore table implementation
*
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class RestoreTablesClient { public class RestoreTablesClient {
@ -76,7 +73,6 @@ public class RestoreTablesClient {
/** /**
* Validate target tables. * Validate target tables.
*
* @param tTableArray target tables * @param tTableArray target tables
* @param isOverwrite overwrite existing table * @param isOverwrite overwrite existing table
* @throws IOException exception * @throws IOException exception
@ -95,26 +91,25 @@ public class RestoreTablesClient {
} }
} else { } else {
LOG.info("HBase table " + tableName LOG.info("HBase table " + tableName
+ " does not exist. It will be created during restore process"); + " does not exist. It will be created during restore process");
} }
} }
} }
if (existTableList.size() > 0) { if (existTableList.size() > 0) {
if (!isOverwrite) { if (!isOverwrite) {
LOG.error("Existing table (" + existTableList LOG.error("Existing table (" + existTableList + ") found in the restore target, please add "
+ ") found in the restore target, please add " + "\"-o\" as overwrite option in the command if you mean"
+ "\"-o\" as overwrite option in the command if you mean" + " to restore to these existing tables");
+ " to restore to these existing tables"); throw new IOException(
throw new IOException("Existing table found in target while no \"-o\" " "Existing table found in target while no \"-o\" " + "as overwrite option found");
+ "as overwrite option found");
} else { } else {
if (disabledTableList.size() > 0) { if (disabledTableList.size() > 0) {
LOG.error("Found offline table in the restore target, " LOG.error("Found offline table in the restore target, "
+ "please enable them before restore with \"-overwrite\" option"); + "please enable them before restore with \"-overwrite\" option");
LOG.info("Offline table list in restore target: " + disabledTableList); LOG.info("Offline table list in restore target: " + disabledTableList);
throw new IOException( throw new IOException(
"Found offline table in the target when restore with \"-overwrite\" option"); "Found offline table in the target when restore with \"-overwrite\" option");
} }
} }
} }
@ -122,16 +117,15 @@ public class RestoreTablesClient {
/** /**
* Restore operation handle each backupImage in array. * Restore operation handle each backupImage in array.
* * @param images array BackupImage
* @param images array BackupImage * @param sTable table to be restored
* @param sTable table to be restored * @param tTable table to be restored to
* @param tTable table to be restored to
* @param truncateIfExists truncate table * @param truncateIfExists truncate table
* @throws IOException exception * @throws IOException exception
*/ */
private void restoreImages(BackupImage[] images, TableName sTable, TableName tTable, private void restoreImages(BackupImage[] images, TableName sTable, TableName tTable,
boolean truncateIfExists) throws IOException { boolean truncateIfExists) throws IOException {
// First image MUST be image of a FULL backup // First image MUST be image of a FULL backup
BackupImage image = images[0]; BackupImage image = images[0];
String rootDir = image.getRootDir(); String rootDir = image.getRootDir();
@ -144,7 +138,7 @@ public class RestoreTablesClient {
BackupManifest manifest = HBackupFileSystem.getManifest(conf, backupRoot, backupId); BackupManifest manifest = HBackupFileSystem.getManifest(conf, backupRoot, backupId);
if (manifest.getType() == BackupType.FULL) { if (manifest.getType() == BackupType.FULL) {
LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from full" + " backup image " LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from full" + " backup image "
+ tableBackupPath.toString()); + tableBackupPath.toString());
conf.set(JOB_NAME_CONF_KEY, "Full_Restore-" + backupId + "-" + tTable); conf.set(JOB_NAME_CONF_KEY, "Full_Restore-" + backupId + "-" + tTable);
restoreTool.fullRestoreTable(conn, tableBackupPath, sTable, tTable, truncateIfExists, restoreTool.fullRestoreTable(conn, tableBackupPath, sTable, tTable, truncateIfExists,
lastIncrBackupId); lastIncrBackupId);
@ -164,7 +158,7 @@ public class RestoreTablesClient {
for (int i = 1; i < images.length; i++) { for (int i = 1; i < images.length; i++) {
BackupImage im = images[i]; BackupImage im = images[i];
String fileBackupDir = String fileBackupDir =
HBackupFileSystem.getTableBackupDir(im.getRootDir(), im.getBackupId(), sTable); HBackupFileSystem.getTableBackupDir(im.getRootDir(), im.getBackupId(), sTable);
List<Path> list = getFilesRecursively(fileBackupDir); List<Path> list = getFilesRecursively(fileBackupDir);
dirList.addAll(list); dirList.addAll(list);
@ -186,7 +180,7 @@ public class RestoreTablesClient {
} }
private List<Path> getFilesRecursively(String fileBackupDir) private List<Path> getFilesRecursively(String fileBackupDir)
throws IllegalArgumentException, IOException { throws IllegalArgumentException, IOException {
FileSystem fs = FileSystem.get((new Path(fileBackupDir)).toUri(), new Configuration()); FileSystem fs = FileSystem.get((new Path(fileBackupDir)).toUri(), new Configuration());
List<Path> list = new ArrayList<>(); List<Path> list = new ArrayList<>();
RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(fileBackupDir), true); RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(fileBackupDir), true);
@ -202,12 +196,12 @@ public class RestoreTablesClient {
/** /**
* Restore operation. Stage 2: resolved Backup Image dependency * Restore operation. Stage 2: resolved Backup Image dependency
* @param backupManifestMap : tableName, Manifest * @param backupManifestMap : tableName, Manifest
* @param sTableArray The array of tables to be restored * @param sTableArray The array of tables to be restored
* @param tTableArray The array of mapping tables to restore to * @param tTableArray The array of mapping tables to restore to
* @throws IOException exception * @throws IOException exception
*/ */
private void restore(HashMap<TableName, BackupManifest> backupManifestMap, private void restore(HashMap<TableName, BackupManifest> backupManifestMap,
TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException {
TreeSet<BackupImage> restoreImageSet = new TreeSet<>(); TreeSet<BackupImage> restoreImageSet = new TreeSet<>();
for (int i = 0; i < sTableArray.length; i++) { for (int i = 0; i < sTableArray.length; i++) {
@ -229,8 +223,7 @@ public class RestoreTablesClient {
LOG.info("Restore includes the following image(s):"); LOG.info("Restore includes the following image(s):");
for (BackupImage image : restoreImageSet) { for (BackupImage image : restoreImageSet) {
LOG.info("Backup: " + image.getBackupId() + " " LOG.info("Backup: " + image.getBackupId() + " "
+ HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table));
table));
} }
} }
} }

View File

@ -1,13 +1,13 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file * regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
@ -44,10 +44,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
/** /**
* Base class for backup operation. Concrete implementation for * Base class for backup operation. Concrete implementation for full and incremental backup are
* full and incremental backup are delegated to corresponding sub-classes: * delegated to corresponding sub-classes: {@link FullTableBackupClient} and
* {@link FullTableBackupClient} and {@link IncrementalTableBackupClient} * {@link IncrementalTableBackupClient}
*
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class TableBackupClient { public abstract class TableBackupClient {
@ -72,12 +71,12 @@ public abstract class TableBackupClient {
} }
public TableBackupClient(final Connection conn, final String backupId, BackupRequest request) public TableBackupClient(final Connection conn, final String backupId, BackupRequest request)
throws IOException { throws IOException {
init(conn, backupId, request); init(conn, backupId, request);
} }
public void init(final Connection conn, final String backupId, BackupRequest request) public void init(final Connection conn, final String backupId, BackupRequest request)
throws IOException { throws IOException {
if (request.getBackupType() == BackupType.FULL) { if (request.getBackupType() == BackupType.FULL) {
backupManager = new BackupManager(conn, conn.getConfiguration()); backupManager = new BackupManager(conn, conn.getConfiguration());
} else { } else {
@ -88,9 +87,8 @@ public abstract class TableBackupClient {
this.conn = conn; this.conn = conn;
this.conf = conn.getConfiguration(); this.conf = conn.getConfiguration();
this.fs = CommonFSUtils.getCurrentFileSystem(conf); this.fs = CommonFSUtils.getCurrentFileSystem(conf);
backupInfo = backupInfo = backupManager.createBackupInfo(backupId, request.getBackupType(), tableList,
backupManager.createBackupInfo(backupId, request.getBackupType(), tableList, request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth());
request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth());
if (tableList == null || tableList.isEmpty()) { if (tableList == null || tableList.isEmpty()) {
this.tableList = new ArrayList<>(backupInfo.getTables()); this.tableList = new ArrayList<>(backupInfo.getTables());
} }
@ -104,7 +102,7 @@ public abstract class TableBackupClient {
* @throws IOException exception * @throws IOException exception
*/ */
protected void beginBackup(BackupManager backupManager, BackupInfo backupInfo) protected void beginBackup(BackupManager backupManager, BackupInfo backupInfo)
throws IOException { throws IOException {
BackupSystemTable.snapshot(conn); BackupSystemTable.snapshot(conn);
backupManager.setBackupInfo(backupInfo); backupManager.setBackupInfo(backupInfo);
@ -136,7 +134,7 @@ public abstract class TableBackupClient {
* @throws IOException exception * @throws IOException exception
*/ */
protected static void deleteSnapshots(final Connection conn, BackupInfo backupInfo, protected static void deleteSnapshots(final Connection conn, BackupInfo backupInfo,
Configuration conf) throws IOException { Configuration conf) throws IOException {
LOG.debug("Trying to delete snapshot for full backup."); LOG.debug("Trying to delete snapshot for full backup.");
for (String snapshotName : backupInfo.getSnapshotNames()) { for (String snapshotName : backupInfo.getSnapshotNames()) {
if (snapshotName == null) { if (snapshotName == null) {
@ -148,7 +146,7 @@ public abstract class TableBackupClient {
admin.deleteSnapshot(snapshotName); admin.deleteSnapshot(snapshotName);
} }
LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupInfo.getBackupId() LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupInfo.getBackupId()
+ " succeeded."); + " succeeded.");
} }
} }
@ -159,9 +157,8 @@ public abstract class TableBackupClient {
*/ */
protected static void cleanupExportSnapshotLog(Configuration conf) throws IOException { protected static void cleanupExportSnapshotLog(Configuration conf) throws IOException {
FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf); FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf);
Path stagingDir = Path stagingDir = new Path(
new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory().toString()));
.toString()));
FileStatus[] files = CommonFSUtils.listStatus(fs, stagingDir); FileStatus[] files = CommonFSUtils.listStatus(fs, stagingDir);
if (files == null) { if (files == null) {
return; return;
@ -177,30 +174,29 @@ public abstract class TableBackupClient {
} }
/** /**
* Clean up the uncompleted data at target directory if the ongoing backup has already entered * Clean up the uncompleted data at target directory if the ongoing backup has already entered the
* the copy phase. * copy phase.
*/ */
protected static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) { protected static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
try { try {
// clean up the uncompleted data at target directory if the ongoing backup has already entered // clean up the uncompleted data at target directory if the ongoing backup has already entered
// the copy phase // the copy phase
LOG.debug("Trying to cleanup up target dir. Current backup phase: " LOG.debug("Trying to cleanup up target dir. Current backup phase: " + backupInfo.getPhase());
+ backupInfo.getPhase()); if (
if (backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY) backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
|| backupInfo.getPhase().equals(BackupPhase.INCREMENTAL_COPY) || backupInfo.getPhase().equals(BackupPhase.INCREMENTAL_COPY)
|| backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST)) { || backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST)
FileSystem outputFs = ) {
FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
// now treat one backup as a transaction, clean up data that has been partially copied at // now treat one backup as a transaction, clean up data that has been partially copied at
// table level // table level
for (TableName table : backupInfo.getTables()) { for (TableName table : backupInfo.getTables()) {
Path targetDirPath = Path targetDirPath = new Path(HBackupFileSystem
new Path(HBackupFileSystem.getTableBackupDir(backupInfo.getBackupRootDir(), .getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
backupInfo.getBackupId(), table));
if (outputFs.delete(targetDirPath, true)) { if (outputFs.delete(targetDirPath, true)) {
LOG.debug("Cleaning up uncompleted backup data at " + targetDirPath.toString() LOG.debug(
+ " done."); "Cleaning up uncompleted backup data at " + targetDirPath.toString() + " done.");
} else { } else {
LOG.debug("No data has been copied to " + targetDirPath.toString() + "."); LOG.debug("No data has been copied to " + targetDirPath.toString() + ".");
} }
@ -216,18 +212,18 @@ public abstract class TableBackupClient {
} catch (IOException e1) { } catch (IOException e1) {
LOG.error("Cleaning up uncompleted backup data of " + backupInfo.getBackupId() + " at " LOG.error("Cleaning up uncompleted backup data of " + backupInfo.getBackupId() + " at "
+ backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + "."); + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
} }
} }
/** /**
* Fail the overall backup. * Fail the overall backup.
* @param backupInfo backup info * @param backupInfo backup info
* @param e exception * @param e exception
* @throws IOException exception * @throws IOException exception
*/ */
protected void failBackup(Connection conn, BackupInfo backupInfo, BackupManager backupManager, protected void failBackup(Connection conn, BackupInfo backupInfo, BackupManager backupManager,
Exception e, String msg, BackupType type, Configuration conf) throws IOException { Exception e, String msg, BackupType type, Configuration conf) throws IOException {
try { try {
LOG.error(msg + getMessage(e), e); LOG.error(msg + getMessage(e), e);
// If this is a cancel exception, then we've already cleaned. // If this is a cancel exception, then we've already cleaned.
@ -238,10 +234,9 @@ public abstract class TableBackupClient {
// set overall backup status: failed // set overall backup status: failed
backupInfo.setState(BackupState.FAILED); backupInfo.setState(BackupState.FAILED);
// compose the backup failed data // compose the backup failed data
String backupFailedData = String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts="
"BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs() + backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
+ ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" + backupInfo.getPhase() + backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
+ ",failedmessage=" + backupInfo.getFailedMsg();
LOG.error(backupFailedData); LOG.error(backupFailedData);
cleanupAndRestoreBackupSystem(conn, backupInfo, conf); cleanupAndRestoreBackupSystem(conn, backupInfo, conf);
// If backup session is updated to FAILED state - means we // If backup session is updated to FAILED state - means we
@ -256,7 +251,7 @@ public abstract class TableBackupClient {
} }
public static void cleanupAndRestoreBackupSystem(Connection conn, BackupInfo backupInfo, public static void cleanupAndRestoreBackupSystem(Connection conn, BackupInfo backupInfo,
Configuration conf) throws IOException { Configuration conf) throws IOException {
BackupType type = backupInfo.getType(); BackupType type = backupInfo.getType();
// if full backup, then delete HBase snapshots if there already are snapshots taken // if full backup, then delete HBase snapshots if there already are snapshots taken
// and also clean up export snapshot log files if exist // and also clean up export snapshot log files if exist
@ -278,7 +273,7 @@ public abstract class TableBackupClient {
* @throws IOException exception * @throws IOException exception
*/ */
protected void addManifest(BackupInfo backupInfo, BackupManager backupManager, BackupType type, protected void addManifest(BackupInfo backupInfo, BackupManager backupManager, BackupType type,
Configuration conf) throws IOException { Configuration conf) throws IOException {
// set the overall backup phase : store manifest // set the overall backup phase : store manifest
backupInfo.setPhase(BackupPhase.STORE_MANIFEST); backupInfo.setPhase(BackupPhase.STORE_MANIFEST);
@ -365,7 +360,7 @@ public abstract class TableBackupClient {
* @throws IOException exception * @throws IOException exception
*/ */
protected void completeBackup(final Connection conn, BackupInfo backupInfo, protected void completeBackup(final Connection conn, BackupInfo backupInfo,
BackupManager backupManager, BackupType type, Configuration conf) throws IOException { BackupManager backupManager, BackupType type, Configuration conf) throws IOException {
// set the complete timestamp of the overall backup // set the complete timestamp of the overall backup
backupInfo.setCompleteTs(EnvironmentEdgeManager.currentTime()); backupInfo.setCompleteTs(EnvironmentEdgeManager.currentTime());
// set overall backup status: complete // set overall backup status: complete
@ -376,9 +371,8 @@ public abstract class TableBackupClient {
// compose the backup complete data // compose the backup complete data
String backupCompleteData = String backupCompleteData =
obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs() obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs() + ",completets="
+ ",completets=" + backupInfo.getCompleteTs() + ",bytescopied=" + backupInfo.getCompleteTs() + ",bytescopied=" + backupInfo.getTotalBytesCopied();
+ backupInfo.getTotalBytesCopied();
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Backup " + backupInfo.getBackupId() + " finished: " + backupCompleteData); LOG.debug("Backup " + backupInfo.getBackupId() + " finished: " + backupCompleteData);
} }
@ -404,23 +398,26 @@ public abstract class TableBackupClient {
/** /**
* Backup request execution. * Backup request execution.
*
* @throws IOException if the execution of the backup fails * @throws IOException if the execution of the backup fails
*/ */
public abstract void execute() throws IOException; public abstract void execute() throws IOException;
protected Stage getTestStage() { protected Stage getTestStage() {
return Stage.valueOf("stage_"+ conf.getInt(BACKUP_TEST_MODE_STAGE, 0)); return Stage.valueOf("stage_" + conf.getInt(BACKUP_TEST_MODE_STAGE, 0));
} }
protected void failStageIf(Stage stage) throws IOException { protected void failStageIf(Stage stage) throws IOException {
Stage current = getTestStage(); Stage current = getTestStage();
if (current == stage) { if (current == stage) {
throw new IOException("Failed stage " + stage+" in testing"); throw new IOException("Failed stage " + stage + " in testing");
} }
} }
public enum Stage { public enum Stage {
stage_0, stage_1, stage_2, stage_3, stage_4 stage_0,
stage_1,
stage_2,
stage_3,
stage_4
} }
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -25,7 +25,6 @@ import java.math.BigDecimal;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -125,29 +124,27 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
/** /**
* Update the ongoing backup with new progress. * Update the ongoing backup with new progress.
* @param backupInfo backup info * @param backupInfo backup info
* @param newProgress progress * @param newProgress progress
* @param bytesCopied bytes copied * @param bytesCopied bytes copied
* @throws NoNodeException exception * @throws NoNodeException exception
*/ */
static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, int newProgress,
int newProgress, long bytesCopied) throws IOException { long bytesCopied) throws IOException {
// compose the new backup progress data, using fake number for now // compose the new backup progress data, using fake number for now
String backupProgressData = newProgress + "%"; String backupProgressData = newProgress + "%";
backupInfo.setProgress(newProgress); backupInfo.setProgress(newProgress);
backupManager.updateBackupInfo(backupInfo); backupManager.updateBackupInfo(backupInfo);
LOG.debug("Backup progress data \"" + backupProgressData LOG.debug("Backup progress data \"" + backupProgressData
+ "\" has been updated to backup system table for " + backupInfo.getBackupId()); + "\" has been updated to backup system table for " + backupInfo.getBackupId());
} }
/** /**
* Extends DistCp for progress updating to backup system table * Extends DistCp for progress updating to backup system table during backup. Using DistCpV2
* during backup. Using DistCpV2 (MAPREDUCE-2765). * (MAPREDUCE-2765). Simply extend it and override execute() method to get the Job reference for
* Simply extend it and override execute() method to get the * progress updating. Only the argument "src1, [src2, [...]] dst" is supported, no more DistCp
* Job reference for progress updating. * options.
* Only the argument "src1, [src2, [...]] dst" is supported,
* no more DistCp options.
*/ */
class BackupDistCp extends DistCp { class BackupDistCp extends DistCp {
@ -156,14 +153,12 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
private BackupManager backupManager; private BackupManager backupManager;
public BackupDistCp(Configuration conf, DistCpOptions options, BackupInfo backupInfo, public BackupDistCp(Configuration conf, DistCpOptions options, BackupInfo backupInfo,
BackupManager backupManager) throws Exception { BackupManager backupManager) throws Exception {
super(conf, options); super(conf, options);
this.backupInfo = backupInfo; this.backupInfo = backupInfo;
this.backupManager = backupManager; this.backupManager = backupManager;
} }
@Override @Override
public Job execute() throws Exception { public Job execute() throws Exception {
@ -188,43 +183,41 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
long totalSrcLgth = 0; long totalSrcLgth = 0;
for (Path aSrc : srcs) { for (Path aSrc : srcs) {
totalSrcLgth += totalSrcLgth += BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc);
BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc);
} }
// Async call // Async call
job = super.execute(); job = super.execute();
// Update the copy progress to system table every 0.5s if progress value changed // Update the copy progress to system table every 0.5s if progress value changed
int progressReportFreq = int progressReportFreq = MapReduceBackupCopyJob.this.getConf()
MapReduceBackupCopyJob.this.getConf().getInt("hbase.backup.progressreport.frequency", .getInt("hbase.backup.progressreport.frequency", 500);
500);
float lastProgress = progressDone; float lastProgress = progressDone;
while (!job.isComplete()) { while (!job.isComplete()) {
float newProgress = float newProgress =
progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS);
if (newProgress > lastProgress) { if (newProgress > lastProgress) {
BigDecimal progressData = BigDecimal progressData =
new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP);
String newProgressStr = progressData + "%"; String newProgressStr = progressData + "%";
LOG.info("Progress: " + newProgressStr); LOG.info("Progress: " + newProgressStr);
updateProgress(backupInfo, backupManager, progressData.intValue(), bytesCopied); updateProgress(backupInfo, backupManager, progressData.intValue(), bytesCopied);
LOG.debug("Backup progress data updated to backup system table: \"Progress: " LOG.debug("Backup progress data updated to backup system table: \"Progress: "
+ newProgressStr + ".\""); + newProgressStr + ".\"");
lastProgress = newProgress; lastProgress = newProgress;
} }
Thread.sleep(progressReportFreq); Thread.sleep(progressReportFreq);
} }
// update the progress data after copy job complete // update the progress data after copy job complete
float newProgress = float newProgress =
progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS);
BigDecimal progressData = BigDecimal progressData =
new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP);
String newProgressStr = progressData + "%"; String newProgressStr = progressData + "%";
LOG.info("Progress: " + newProgressStr + " subTask: " + subTaskPercntgInWholeTask LOG.info("Progress: " + newProgressStr + " subTask: " + subTaskPercntgInWholeTask
+ " mapProgress: " + job.mapProgress()); + " mapProgress: " + job.mapProgress());
// accumulate the overall backup progress // accumulate the overall backup progress
progressDone = newProgress; progressDone = newProgress;
@ -232,7 +225,7 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
updateProgress(backupInfo, backupManager, progressData.intValue(), bytesCopied); updateProgress(backupInfo, backupManager, progressData.intValue(), bytesCopied);
LOG.debug("Backup progress data updated to backup system table: \"Progress: " LOG.debug("Backup progress data updated to backup system table: \"Progress: "
+ newProgressStr + " - " + bytesCopied + " bytes copied.\""); + newProgressStr + " - " + bytesCopied + " bytes copied.\"");
} catch (Throwable t) { } catch (Throwable t) {
LOG.error(t.toString(), t); LOG.error(t.toString(), t);
throw t; throw t;
@ -241,8 +234,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
String jobID = job.getJobID().toString(); String jobID = job.getJobID().toString();
job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID);
LOG.debug("DistCp job-id: " + jobID + " completed: " + job.isComplete() + " " LOG.debug(
+ job.isSuccessful()); "DistCp job-id: " + jobID + " completed: " + job.isComplete() + " " + job.isSuccessful());
Counters ctrs = job.getCounters(); Counters ctrs = job.getCounters();
LOG.debug(Objects.toString(ctrs)); LOG.debug(Objects.toString(ctrs));
if (job.isComplete() && !job.isSuccessful()) { if (job.isComplete() && !job.isSuccessful()) {
@ -252,11 +245,11 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
return job; return job;
} }
private Field getInputOptionsField(Class<?> classDistCp) throws IOException{ private Field getInputOptionsField(Class<?> classDistCp) throws IOException {
Field f = null; Field f = null;
try { try {
f = classDistCp.getDeclaredField("inputOptions"); f = classDistCp.getDeclaredField("inputOptions");
} catch(Exception e) { } catch (Exception e) {
// Haddop 3 // Haddop 3
try { try {
f = classDistCp.getDeclaredField("context"); f = classDistCp.getDeclaredField("context");
@ -268,7 +261,7 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
private List<Path> getSourcePaths(Field fieldInputOptions) throws IOException{ private List<Path> getSourcePaths(Field fieldInputOptions) throws IOException {
Object options; Object options;
try { try {
options = fieldInputOptions.get(this); options = fieldInputOptions.get(this);
@ -282,9 +275,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
return (List<Path>) methodGetSourcePaths.invoke(options); return (List<Path>) methodGetSourcePaths.invoke(options);
} }
} catch (IllegalArgumentException | IllegalAccessException | } catch (IllegalArgumentException | IllegalAccessException | ClassNotFoundException
ClassNotFoundException | NoSuchMethodException | | NoSuchMethodException | SecurityException | InvocationTargetException e) {
SecurityException | InvocationTargetException e) {
throw new IOException(e); throw new IOException(e);
} }
@ -321,8 +313,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
cfg.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, fileListingPath.toString()); cfg.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, fileListingPath.toString());
cfg.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, totalRecords); cfg.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, totalRecords);
} catch (NoSuchFieldException | SecurityException | IllegalArgumentException } catch (NoSuchFieldException | SecurityException | IllegalArgumentException
| IllegalAccessException | NoSuchMethodException | ClassNotFoundException | IllegalAccessException | NoSuchMethodException | ClassNotFoundException
| InvocationTargetException e) { | InvocationTargetException e) {
throw new IOException(e); throw new IOException(e);
} }
return fileListingPath; return fileListingPath;
@ -340,8 +332,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
} }
private List<Path> getSourceFiles() throws NoSuchFieldException, SecurityException, private List<Path> getSourceFiles() throws NoSuchFieldException, SecurityException,
IllegalArgumentException, IllegalAccessException, NoSuchMethodException, IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
ClassNotFoundException, InvocationTargetException, IOException { ClassNotFoundException, InvocationTargetException, IOException {
Field options = null; Field options = null;
try { try {
options = DistCp.class.getDeclaredField("inputOptions"); options = DistCp.class.getDeclaredField("inputOptions");
@ -352,8 +344,6 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
return getSourcePaths(options); return getSourcePaths(options);
} }
private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException { private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException {
FileSystem fs = pathToListFile.getFileSystem(conf); FileSystem fs = pathToListFile.getFileSystem(conf);
fs.delete(pathToListFile, false); fs.delete(pathToListFile, false);
@ -367,15 +357,15 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
/** /**
* Do backup copy based on different types. * Do backup copy based on different types.
* @param context The backup info * @param context The backup info
* @param conf The hadoop configuration * @param conf The hadoop configuration
* @param copyType The backup copy type * @param copyType The backup copy type
* @param options Options for customized ExportSnapshot or DistCp * @param options Options for customized ExportSnapshot or DistCp
* @throws Exception exception * @throws Exception exception
*/ */
@Override @Override
public int copy(BackupInfo context, BackupManager backupManager, Configuration conf, public int copy(BackupInfo context, BackupManager backupManager, Configuration conf,
BackupType copyType, String[] options) throws IOException { BackupType copyType, String[] options) throws IOException {
int res = 0; int res = 0;
try { try {
@ -391,7 +381,7 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
setSubTaskPercntgInWholeTask(1f); setSubTaskPercntgInWholeTask(1f);
BackupDistCp distcp = BackupDistCp distcp =
new BackupDistCp(new Configuration(conf), null, context, backupManager); new BackupDistCp(new Configuration(conf), null, context, backupManager);
// Handle a special case where the source file is a single file. // Handle a special case where the source file is a single file.
// In this case, distcp will not create the target dir. It just take the // In this case, distcp will not create the target dir. It just take the
// target as a file name and copy source file to the target (as a file name). // target as a file name and copy source file to the target (as a file name).

View File

@ -1,13 +1,13 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file * regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.backup.mapreduce; package org.apache.hadoop.hbase.backup.mapreduce;
import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded; import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
@ -52,9 +53,8 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
/** /**
* MapReduce implementation of {@link BackupMergeJob} * MapReduce implementation of {@link BackupMergeJob} Must be initialized with configuration of a
* Must be initialized with configuration of a backup destination cluster * backup destination cluster
*
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class MapReduceBackupMergeJob implements BackupMergeJob { public class MapReduceBackupMergeJob implements BackupMergeJob {
@ -119,9 +119,8 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds); Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds);
String dirs = StringUtils.join(dirPaths, ","); String dirs = StringUtils.join(dirPaths, ",");
Path bulkOutputPath = Path bulkOutputPath = BackupUtils.getBulkOutputDir(
BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]), BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false);
getConf(), false);
// Delete content if exists // Delete content if exists
if (fs.exists(bulkOutputPath)) { if (fs.exists(bulkOutputPath)) {
if (!fs.delete(bulkOutputPath, true)) { if (!fs.delete(bulkOutputPath, true)) {
@ -136,7 +135,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
int result = player.run(playerArgs); int result = player.run(playerArgs);
if (!succeeded(result)) { if (!succeeded(result)) {
throw new IOException("Can not merge backup images for " + dirs throw new IOException("Can not merge backup images for " + dirs
+ " (check Hadoop/MR and HBase logs). Player return code =" + result); + " (check Hadoop/MR and HBase logs). Player return code =" + result);
} }
// Add to processed table list // Add to processed table list
processedTableList.add(new Pair<>(tableNames[i], bulkOutputPath)); processedTableList.add(new Pair<>(tableNames[i], bulkOutputPath));
@ -149,14 +148,14 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
// PHASE 2 (modification of a backup file system) // PHASE 2 (modification of a backup file system)
// Move existing mergedBackupId data into tmp directory // Move existing mergedBackupId data into tmp directory
// we will need it later in case of a failure // we will need it later in case of a failure
Path tmpBackupDir = HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, Path tmpBackupDir =
mergedBackupId); HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, mergedBackupId);
Path backupDirPath = HBackupFileSystem.getBackupPath(backupRoot, mergedBackupId); Path backupDirPath = HBackupFileSystem.getBackupPath(backupRoot, mergedBackupId);
if (!fs.rename(backupDirPath, tmpBackupDir)) { if (!fs.rename(backupDirPath, tmpBackupDir)) {
throw new IOException("Failed to rename "+ backupDirPath +" to "+tmpBackupDir); throw new IOException("Failed to rename " + backupDirPath + " to " + tmpBackupDir);
} else { } else {
LOG.debug("Renamed "+ backupDirPath +" to "+ tmpBackupDir); LOG.debug("Renamed " + backupDirPath + " to " + tmpBackupDir);
} }
// Move new data into backup dest // Move new data into backup dest
for (Pair<TableName, Path> tn : processedTableList) { for (Pair<TableName, Path> tn : processedTableList) {
@ -170,7 +169,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
// Delete tmp dir (Rename back during repair) // Delete tmp dir (Rename back during repair)
if (!fs.delete(tmpBackupDir, true)) { if (!fs.delete(tmpBackupDir, true)) {
// WARN and ignore // WARN and ignore
LOG.warn("Could not delete tmp dir: "+ tmpBackupDir); LOG.warn("Could not delete tmp dir: " + tmpBackupDir);
} }
// Delete old data // Delete old data
deleteBackupImages(backupsToDelete, conn, fs, backupRoot); deleteBackupImages(backupsToDelete, conn, fs, backupRoot);
@ -193,8 +192,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
} else { } else {
// backup repair must be run // backup repair must be run
throw new IOException( throw new IOException(
"Backup merge operation failed, run backup repair tool to restore system's integrity", "Backup merge operation failed, run backup repair tool to restore system's integrity", e);
e);
} }
} finally { } finally {
table.close(); table.close();
@ -204,13 +202,13 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
/** /**
* Copy meta data to of a backup session * Copy meta data to of a backup session
* @param fs file system * @param fs file system
* @param tmpBackupDir temp backup directory, where meta is locaed * @param tmpBackupDir temp backup directory, where meta is locaed
* @param backupDirPath new path for backup * @param backupDirPath new path for backup
* @throws IOException exception * @throws IOException exception
*/ */
protected void copyMetaData(FileSystem fs, Path tmpBackupDir, Path backupDirPath) protected void copyMetaData(FileSystem fs, Path tmpBackupDir, Path backupDirPath)
throws IOException { throws IOException {
RemoteIterator<LocatedFileStatus> it = fs.listFiles(tmpBackupDir, true); RemoteIterator<LocatedFileStatus> it = fs.listFiles(tmpBackupDir, true);
List<Path> toKeep = new ArrayList<Path>(); List<Path> toKeep = new ArrayList<Path>();
while (it.hasNext()) { while (it.hasNext()) {
@ -220,8 +218,10 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
} }
// Keep meta // Keep meta
String fileName = p.toString(); String fileName = p.toString();
if (fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0 if (
|| fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0) { fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0
|| fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0
) {
toKeep.add(p); toKeep.add(p);
} }
} }
@ -234,8 +234,8 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
/** /**
* Copy file in DFS from p to newPath * Copy file in DFS from p to newPath
* @param fs file system * @param fs file system
* @param p old path * @param p old path
* @param newPath new path * @param newPath new path
* @throws IOException exception * @throws IOException exception
*/ */
@ -249,12 +249,12 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
} }
} }
/** /**
* Converts path before copying * Converts path before copying
* @param p path * @param p path
* @param backupDirPath backup root * @param backupDirPath backup root
* @return converted path * @return converted path
*/ */
protected Path convertToDest(Path p, Path backupDirPath) { protected Path convertToDest(Path p, Path backupDirPath) {
String backupId = backupDirPath.getName(); String backupId = backupDirPath.getName();
Stack<String> stack = new Stack<String>(); Stack<String> stack = new Stack<String>();
@ -300,16 +300,16 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
} }
protected void updateBackupManifest(String backupRoot, String mergedBackupId, protected void updateBackupManifest(String backupRoot, String mergedBackupId,
List<String> backupsToDelete) throws IllegalArgumentException, IOException { List<String> backupsToDelete) throws IllegalArgumentException, IOException {
BackupManifest manifest = BackupManifest manifest =
HBackupFileSystem.getManifest(conf, new Path(backupRoot), mergedBackupId); HBackupFileSystem.getManifest(conf, new Path(backupRoot), mergedBackupId);
manifest.getBackupImage().removeAncestors(backupsToDelete); manifest.getBackupImage().removeAncestors(backupsToDelete);
// save back // save back
manifest.store(conf); manifest.store(conf);
} }
protected void deleteBackupImages(List<String> backupIds, Connection conn, FileSystem fs, protected void deleteBackupImages(List<String> backupIds, Connection conn, FileSystem fs,
String backupRoot) throws IOException { String backupRoot) throws IOException {
// Delete from backup system table // Delete from backup system table
try (BackupSystemTable table = new BackupSystemTable(conn)) { try (BackupSystemTable table = new BackupSystemTable(conn)) {
for (String backupId : backupIds) { for (String backupId : backupIds) {
@ -339,24 +339,24 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
} }
protected void moveData(FileSystem fs, String backupRoot, Path bulkOutputPath, protected void moveData(FileSystem fs, String backupRoot, Path bulkOutputPath,
TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException { TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException {
Path dest = Path dest =
new Path(HBackupFileSystem.getTableBackupDir(backupRoot, mergedBackupId, tableName)); new Path(HBackupFileSystem.getTableBackupDir(backupRoot, mergedBackupId, tableName));
FileStatus[] fsts = fs.listStatus(bulkOutputPath); FileStatus[] fsts = fs.listStatus(bulkOutputPath);
for (FileStatus fst : fsts) { for (FileStatus fst : fsts) {
if (fst.isDirectory()) { if (fst.isDirectory()) {
String family = fst.getPath().getName(); String family = fst.getPath().getName();
Path newDst = new Path(dest, family); Path newDst = new Path(dest, family);
if (fs.exists(newDst)) { if (fs.exists(newDst)) {
if (!fs.delete(newDst, true)) { if (!fs.delete(newDst, true)) {
throw new IOException("failed to delete :"+ newDst); throw new IOException("failed to delete :" + newDst);
} }
} else { } else {
fs.mkdirs(dest); fs.mkdirs(dest);
} }
boolean result = fs.rename(fst.getPath(), dest); boolean result = fs.rename(fst.getPath(), dest);
LOG.debug("MoveData from "+ fst.getPath() +" to "+ dest+" result="+ result); LOG.debug("MoveData from " + fst.getPath() + " to " + dest + " result=" + result);
} }
} }
} }
@ -365,7 +365,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
Set<TableName> allSet = new HashSet<>(); Set<TableName> allSet = new HashSet<>();
try (Connection conn = ConnectionFactory.createConnection(conf); try (Connection conn = ConnectionFactory.createConnection(conf);
BackupSystemTable table = new BackupSystemTable(conn)) { BackupSystemTable table = new BackupSystemTable(conn)) {
for (String backupId : backupIds) { for (String backupId : backupIds) {
BackupInfo bInfo = table.readBackupInfo(backupId); BackupInfo bInfo = table.readBackupInfo(backupId);
@ -378,12 +378,12 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
} }
protected Path[] findInputDirectories(FileSystem fs, String backupRoot, TableName tableName, protected Path[] findInputDirectories(FileSystem fs, String backupRoot, TableName tableName,
String[] backupIds) throws IOException { String[] backupIds) throws IOException {
List<Path> dirs = new ArrayList<>(); List<Path> dirs = new ArrayList<>();
for (String backupId : backupIds) { for (String backupId : backupIds) {
Path fileBackupDirPath = Path fileBackupDirPath =
new Path(HBackupFileSystem.getTableBackupDir(backupRoot, backupId, tableName)); new Path(HBackupFileSystem.getTableBackupDir(backupRoot, backupId, tableName));
if (fs.exists(fileBackupDirPath)) { if (fs.exists(fileBackupDirPath)) {
dirs.add(fileBackupDirPath); dirs.add(fileBackupDirPath);
} else { } else {

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.backup.mapreduce; package org.apache.hadoop.hbase.backup.mapreduce;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -69,17 +68,15 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
} }
/** /**
* A mapper that just writes out cells. This one can be used together with * A mapper that just writes out cells. This one can be used together with {@link CellSortReducer}
* {@link CellSortReducer}
*/ */
static class HFileCellMapper extends static class HFileCellMapper extends Mapper<NullWritable, Cell, ImmutableBytesWritable, Cell> {
Mapper<NullWritable, Cell, ImmutableBytesWritable, Cell> {
@Override @Override
public void map(NullWritable key, Cell value, Context context) public void map(NullWritable key, Cell value, Context context)
throws IOException, InterruptedException { throws IOException, InterruptedException {
context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)),
new MapReduceExtendedCell(value)); new MapReduceExtendedCell(value));
} }
@Override @Override
@ -100,9 +97,8 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
String tabName = args[1]; String tabName = args[1];
conf.setStrings(TABLES_KEY, tabName); conf.setStrings(TABLES_KEY, tabName);
conf.set(FileInputFormat.INPUT_DIR, inputDirs); conf.set(FileInputFormat.INPUT_DIR, inputDirs);
Job job = Job job = Job.getInstance(conf,
Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime()));
conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime()));
job.setJarByClass(MapReduceHFileSplitterJob.class); job.setJarByClass(MapReduceHFileSplitterJob.class);
job.setInputFormatClass(HFileInputFormat.class); job.setInputFormatClass(HFileInputFormat.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class); job.setMapOutputKeyClass(ImmutableBytesWritable.class);
@ -116,8 +112,8 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
FileOutputFormat.setOutputPath(job, outputDir); FileOutputFormat.setOutputPath(job, outputDir);
job.setMapOutputValueClass(MapReduceExtendedCell.class); job.setMapOutputValueClass(MapReduceExtendedCell.class);
try (Connection conn = ConnectionFactory.createConnection(conf); try (Connection conn = ConnectionFactory.createConnection(conf);
Table table = conn.getTable(tableName); Table table = conn.getTable(tableName);
RegionLocator regionLocator = conn.getRegionLocator(tableName)) { RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator); HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
} }
LOG.debug("success configuring load incremental job"); LOG.debug("success configuring load incremental job");
@ -145,9 +141,9 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output");
System.err.println("Other options:"); System.err.println("Other options:");
System.err.println(" -D " + JOB_NAME_CONF_KEY System.err.println(" -D " + JOB_NAME_CONF_KEY
+ "=jobName - use the specified mapreduce job name for the HFile splitter"); + "=jobName - use the specified mapreduce job name for the HFile splitter");
System.err.println("For performance also consider the following options:\n" System.err.println("For performance also consider the following options:\n"
+ " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false");
} }
/** /**

View File

@ -1,13 +1,13 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file * regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
@ -34,13 +34,10 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
/** /**
* MapReduce implementation of {@link RestoreJob} * MapReduce implementation of {@link RestoreJob} For backup restore, it runs
* * {@link MapReduceHFileSplitterJob} job and creates HFiles which are aligned with a region
* For backup restore, it runs {@link MapReduceHFileSplitterJob} job and creates * boundaries of a table being restored. The resulting HFiles then are loaded using HBase bulk load
* HFiles which are aligned with a region boundaries of a table being * tool {@link BulkLoadHFiles}.
* restored.
*
* The resulting HFiles then are loaded using HBase bulk load tool {@link BulkLoadHFiles}.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class MapReduceRestoreJob implements RestoreJob { public class MapReduceRestoreJob implements RestoreJob {
@ -54,7 +51,7 @@ public class MapReduceRestoreJob implements RestoreJob {
@Override @Override
public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNames, public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNames,
boolean fullBackupRestore) throws IOException { boolean fullBackupRestore) throws IOException {
String bulkOutputConfKey; String bulkOutputConfKey;
player = new MapReduceHFileSplitterJob(); player = new MapReduceHFileSplitterJob();
@ -65,24 +62,21 @@ public class MapReduceRestoreJob implements RestoreJob {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Restore " + (fullBackupRestore ? "full" : "incremental") LOG.debug("Restore " + (fullBackupRestore ? "full" : "incremental")
+ " backup from directory " + dirs + " from hbase tables " + " backup from directory " + dirs + " from hbase tables "
+ StringUtils.join(tableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND) + StringUtils.join(tableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND)
+ " to tables " + " to tables "
+ StringUtils.join(newTableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND)); + StringUtils.join(newTableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND));
} }
for (int i = 0; i < tableNames.length; i++) { for (int i = 0; i < tableNames.length; i++) {
LOG.info("Restore " + tableNames[i] + " into " + newTableNames[i]); LOG.info("Restore " + tableNames[i] + " into " + newTableNames[i]);
Path bulkOutputPath = Path bulkOutputPath = BackupUtils
BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]), .getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]), getConf());
getConf());
Configuration conf = getConf(); Configuration conf = getConf();
conf.set(bulkOutputConfKey, bulkOutputPath.toString()); conf.set(bulkOutputConfKey, bulkOutputPath.toString());
String[] playerArgs = { String[] playerArgs = { dirs,
dirs, fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i] fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i].getNameAsString() };
.getNameAsString()
};
int result; int result;
try { try {
@ -97,18 +91,18 @@ public class MapReduceRestoreJob implements RestoreJob {
} }
if (loader.bulkLoad(newTableNames[i], bulkOutputPath).isEmpty()) { if (loader.bulkLoad(newTableNames[i], bulkOutputPath).isEmpty()) {
throw new IOException("Can not restore from backup directory " + dirs + throw new IOException("Can not restore from backup directory " + dirs
" (check Hadoop and HBase logs). Bulk loader returns null"); + " (check Hadoop and HBase logs). Bulk loader returns null");
} }
} else { } else {
throw new IOException("Can not restore from backup directory " + dirs throw new IOException("Can not restore from backup directory " + dirs
+ " (check Hadoop/MR and HBase logs). Player return code =" + result); + " (check Hadoop/MR and HBase logs). Player return code =" + result);
} }
LOG.debug("Restore Job finished:" + result); LOG.debug("Restore Job finished:" + result);
} catch (Exception e) { } catch (Exception e) {
LOG.error(e.toString(), e); LOG.error(e.toString(), e);
throw new IOException("Can not restore from backup directory " + dirs throw new IOException(
+ " (check Hadoop and HBase logs) ", e); "Can not restore from backup directory " + dirs + " (check Hadoop and HBase logs) ", e);
} }
} }
} }

View File

@ -1,5 +1,4 @@
/** /*
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -43,6 +42,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils; import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils;
@ -62,8 +62,7 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
@Override @Override
public void init(Map<String, Object> params) { public void init(Map<String, Object> params) {
MasterServices master = (MasterServices) MapUtils.getObject(params, MasterServices master = (MasterServices) MapUtils.getObject(params, HMaster.MASTER);
HMaster.MASTER);
if (master != null) { if (master != null) {
conn = master.getConnection(); conn = master.getConnection();
if (getConf() == null) { if (getConf() == null) {
@ -79,7 +78,6 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
} }
} }
private Map<Address, Long> getServersToOldestBackupMapping(List<BackupInfo> backups) private Map<Address, Long> getServersToOldestBackupMapping(List<BackupInfo> backups)
throws IOException { throws IOException {
Map<Address, Long> serverAddressToLastBackupMap = new HashMap<>(); Map<Address, Long> serverAddressToLastBackupMap = new HashMap<>();
@ -136,8 +134,10 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
Address.fromString(BackupUtils.parseHostNameFromLogFile(file.getPath())); Address.fromString(BackupUtils.parseHostNameFromLogFile(file.getPath()));
long walTimestamp = AbstractFSWALProvider.getTimestamp(file.getPath().getName()); long walTimestamp = AbstractFSWALProvider.getTimestamp(file.getPath().getName());
if (!addressToLastBackupMap.containsKey(walServerAddress) if (
|| addressToLastBackupMap.get(walServerAddress) >= walTimestamp) { !addressToLastBackupMap.containsKey(walServerAddress)
|| addressToLastBackupMap.get(walServerAddress) >= walTimestamp
) {
filteredFiles.add(file); filteredFiles.add(file);
} }
} catch (Exception ex) { } catch (Exception ex) {
@ -147,8 +147,8 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
} }
} }
LOG LOG.info("Total files: {}, Filtered Files: {}", IterableUtils.size(files),
.info("Total files: {}, Filtered Files: {}", IterableUtils.size(files), filteredFiles.size()); filteredFiles.size());
return filteredFiles; return filteredFiles;
} }
@ -156,8 +156,10 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
public void setConf(Configuration config) { public void setConf(Configuration config) {
// If backup is disabled, keep all members null // If backup is disabled, keep all members null
super.setConf(config); super.setConf(config);
if (!config.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, if (
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) { !config.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)
) {
LOG.warn("Backup is disabled - allowing all wals to be deleted"); LOG.warn("Backup is disabled - allowing all wals to be deleted");
} }
} }

View File

@ -1,13 +1,13 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file * regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
@ -15,14 +15,12 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.master; package org.apache.hadoop.hbase.backup.master;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
@ -61,7 +59,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
public static final String BACKUP_WAKE_MILLIS_KEY = "hbase.backup.logroll.wake.millis"; public static final String BACKUP_WAKE_MILLIS_KEY = "hbase.backup.logroll.wake.millis";
public static final String BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.logroll.timeout.millis"; public static final String BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.logroll.timeout.millis";
public static final String BACKUP_POOL_THREAD_NUMBER_KEY = public static final String BACKUP_POOL_THREAD_NUMBER_KEY =
"hbase.backup.logroll.pool.thread.number"; "hbase.backup.logroll.pool.thread.number";
public static final int BACKUP_WAKE_MILLIS_DEFAULT = 500; public static final int BACKUP_WAKE_MILLIS_DEFAULT = 500;
public static final int BACKUP_TIMEOUT_MILLIS_DEFAULT = 180000; public static final int BACKUP_TIMEOUT_MILLIS_DEFAULT = 180000;
@ -82,26 +80,24 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
@Override @Override
public void initialize(MasterServices master, MetricsMaster metricsMaster) public void initialize(MasterServices master, MetricsMaster metricsMaster)
throws IOException, UnsupportedOperationException { throws IOException, UnsupportedOperationException {
this.master = master; this.master = master;
this.done = false; this.done = false;
// setup the default procedure coordinator // setup the default procedure coordinator
String name = master.getServerName().toString(); String name = master.getServerName().toString();
// get the configuration for the coordinator // get the configuration for the coordinator
Configuration conf = master.getConfiguration(); Configuration conf = master.getConfiguration();
long wakeFrequency = conf.getInt(BACKUP_WAKE_MILLIS_KEY, BACKUP_WAKE_MILLIS_DEFAULT); long wakeFrequency = conf.getInt(BACKUP_WAKE_MILLIS_KEY, BACKUP_WAKE_MILLIS_DEFAULT);
long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY,BACKUP_TIMEOUT_MILLIS_DEFAULT); long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT);
int opThreads = conf.getInt(BACKUP_POOL_THREAD_NUMBER_KEY, int opThreads = conf.getInt(BACKUP_POOL_THREAD_NUMBER_KEY, BACKUP_POOL_THREAD_NUMBER_DEFAULT);
BACKUP_POOL_THREAD_NUMBER_DEFAULT);
// setup the default procedure coordinator // setup the default procedure coordinator
ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, opThreads); ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, opThreads);
ProcedureCoordinationManager coordManager = new ZKProcedureCoordinationManager(master); ProcedureCoordinationManager coordManager = new ZKProcedureCoordinationManager(master);
ProcedureCoordinatorRpcs comms = ProcedureCoordinatorRpcs comms =
coordManager.getProcedureCoordinatorRpcs(getProcedureSignature(), name); coordManager.getProcedureCoordinatorRpcs(getProcedureSignature(), name);
this.coordinator = new ProcedureCoordinator(comms, tpool, timeoutMillis, wakeFrequency); this.coordinator = new ProcedureCoordinator(comms, tpool, timeoutMillis, wakeFrequency);
} }
@ -115,7 +111,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
public void execProcedure(ProcedureDescription desc) throws IOException { public void execProcedure(ProcedureDescription desc) throws IOException {
if (!isBackupEnabled()) { if (!isBackupEnabled()) {
LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY
+ " setting"); + " setting");
return; return;
} }
this.done = false; this.done = false;
@ -149,12 +145,12 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
this.done = true; this.done = true;
} catch (InterruptedException e) { } catch (InterruptedException e) {
ForeignException ee = ForeignException ee =
new ForeignException("Interrupted while waiting for roll log procdure to finish", e); new ForeignException("Interrupted while waiting for roll log procdure to finish", e);
monitor.receive(ee); monitor.receive(ee);
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
} catch (ForeignException e) { } catch (ForeignException e) {
ForeignException ee = ForeignException ee =
new ForeignException("Exception while waiting for roll log procdure to finish", e); new ForeignException("Exception while waiting for roll log procdure to finish", e);
monitor.receive(ee); monitor.receive(ee);
} }
monitor.rethrowException(); monitor.rethrowException();
@ -162,7 +158,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
@Override @Override
public void checkPermissions(ProcedureDescription desc, AccessChecker accessChecker, User user) public void checkPermissions(ProcedureDescription desc, AccessChecker accessChecker, User user)
throws IOException { throws IOException {
// TODO: what permissions checks are needed here? // TODO: what permissions checks are needed here?
} }

View File

@ -1,13 +1,13 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file * regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
@ -15,13 +15,11 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.regionserver; package org.apache.hadoop.hbase.backup.regionserver;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
@ -50,10 +48,10 @@ public class LogRollBackupSubprocedure extends Subprocedure {
private String backupRoot; private String backupRoot;
public LogRollBackupSubprocedure(RegionServerServices rss, ProcedureMember member, public LogRollBackupSubprocedure(RegionServerServices rss, ProcedureMember member,
ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout,
LogRollBackupSubprocedurePool taskManager, byte[] data) { LogRollBackupSubprocedurePool taskManager, byte[] data) {
super(member, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, errorListener, super(member, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, errorListener,
wakeFrequency, timeout); wakeFrequency, timeout);
LOG.info("Constructing a LogRollBackupSubprocedure."); LOG.info("Constructing a LogRollBackupSubprocedure.");
this.rss = rss; this.rss = rss;
this.taskManager = taskManager; this.taskManager = taskManager;
@ -91,7 +89,7 @@ public class LogRollBackupSubprocedure extends Subprocedure {
} }
LOG.info("Trying to roll log in backup subprocedure, current log number: " + filenum LOG.info("Trying to roll log in backup subprocedure, current log number: " + filenum
+ " highest: " + highest + " on " + rss.getServerName()); + " highest: " + highest + " on " + rss.getServerName());
((HRegionServer) rss).getWalRoller().requestRollAll(); ((HRegionServer) rss).getWalRoller().requestRollAll();
long start = EnvironmentEdgeManager.currentTime(); long start = EnvironmentEdgeManager.currentTime();
while (!((HRegionServer) rss).getWalRoller().walRollFinished()) { while (!((HRegionServer) rss).getWalRoller().walRollFinished()) {
@ -99,20 +97,20 @@ public class LogRollBackupSubprocedure extends Subprocedure {
} }
LOG.debug("log roll took " + (EnvironmentEdgeManager.currentTime() - start)); LOG.debug("log roll took " + (EnvironmentEdgeManager.currentTime() - start));
LOG.info("After roll log in backup subprocedure, current log number: " + fsWAL.getFilenum() LOG.info("After roll log in backup subprocedure, current log number: " + fsWAL.getFilenum()
+ " on " + rss.getServerName()); + " on " + rss.getServerName());
Connection connection = rss.getConnection(); Connection connection = rss.getConnection();
try (final BackupSystemTable table = new BackupSystemTable(connection)) { try (final BackupSystemTable table = new BackupSystemTable(connection)) {
// sanity check, good for testing // sanity check, good for testing
HashMap<String, Long> serverTimestampMap = HashMap<String, Long> serverTimestampMap =
table.readRegionServerLastLogRollResult(backupRoot); table.readRegionServerLastLogRollResult(backupRoot);
String host = rss.getServerName().getHostname(); String host = rss.getServerName().getHostname();
int port = rss.getServerName().getPort(); int port = rss.getServerName().getPort();
String server = host + ":" + port; String server = host + ":" + port;
Long sts = serverTimestampMap.get(host); Long sts = serverTimestampMap.get(host);
if (sts != null && sts > highest) { if (sts != null && sts > highest) {
LOG.warn("Won't update server's last roll log result: current=" + sts + " new=" LOG
+ highest); .warn("Won't update server's last roll log result: current=" + sts + " new=" + highest);
return null; return null;
} }
// write the log number to backup system table. // write the log number to backup system table.

View File

@ -1,13 +1,13 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file * regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.regionserver; package org.apache.hadoop.hbase.backup.regionserver;
import java.io.Closeable; import java.io.Closeable;
@ -28,19 +27,18 @@ import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
/** /**
* Handle running each of the individual tasks for completing a backup procedure on a region * Handle running each of the individual tasks for completing a backup procedure on a region server.
* server.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class LogRollBackupSubprocedurePool implements Closeable, Abortable { public class LogRollBackupSubprocedurePool implements Closeable, Abortable {
@ -58,9 +56,8 @@ public class LogRollBackupSubprocedurePool implements Closeable, Abortable {
public LogRollBackupSubprocedurePool(String name, Configuration conf) { public LogRollBackupSubprocedurePool(String name, Configuration conf) {
// configure the executor service // configure the executor service
long keepAlive = long keepAlive = conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY,
conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY, LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT);
LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT);
int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS); int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS);
this.name = name; this.name = name;
executor = executor =
@ -94,7 +91,7 @@ public class LogRollBackupSubprocedurePool implements Closeable, Abortable {
} catch (InterruptedException e) { } catch (InterruptedException e) {
if (aborted) { if (aborted) {
throw new ForeignException("Interrupted and found to be aborted while waiting for tasks!", throw new ForeignException("Interrupted and found to be aborted while waiting for tasks!",
e); e);
} }
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
} catch (ExecutionException e) { } catch (ExecutionException e) {

View File

@ -1,13 +1,13 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file * regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
@ -15,12 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.regionserver; package org.apache.hadoop.hbase.backup.regionserver;
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.impl.BackupManager;
@ -53,7 +51,7 @@ import org.slf4j.LoggerFactory;
@InterfaceAudience.Private @InterfaceAudience.Private
public class LogRollRegionServerProcedureManager extends RegionServerProcedureManager { public class LogRollRegionServerProcedureManager extends RegionServerProcedureManager {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(LogRollRegionServerProcedureManager.class); LoggerFactory.getLogger(LogRollRegionServerProcedureManager.class);
/** Conf key for number of request threads to start backup on region servers */ /** Conf key for number of request threads to start backup on region servers */
public static final String BACKUP_REQUEST_THREADS_KEY = "hbase.backup.region.pool.threads"; public static final String BACKUP_REQUEST_THREADS_KEY = "hbase.backup.region.pool.threads";
@ -86,7 +84,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
public void start() { public void start() {
if (!BackupManager.isBackupEnabled(rss.getConfiguration())) { if (!BackupManager.isBackupEnabled(rss.getConfiguration())) {
LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY
+ " setting"); + " setting");
return; return;
} }
this.memberRpcs.start(rss.getServerName().toString(), member); this.memberRpcs.start(rss.getServerName().toString(), member);
@ -122,7 +120,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
// don't run a backup if the parent is stop(ping) // don't run a backup if the parent is stop(ping)
if (rss.isStopping() || rss.isStopped()) { if (rss.isStopping() || rss.isStopped()) {
throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName() throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName()
+ ", because stopping/stopped!"); + ", because stopping/stopped!");
} }
LOG.info("Attempting to run a roll log procedure for backup."); LOG.info("Attempting to run a roll log procedure for backup.");
@ -130,12 +128,12 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
Configuration conf = rss.getConfiguration(); Configuration conf = rss.getConfiguration();
long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT); long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT);
long wakeMillis = long wakeMillis =
conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT); conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT);
LogRollBackupSubprocedurePool taskManager = LogRollBackupSubprocedurePool taskManager =
new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf); new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf);
return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis, return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis,
taskManager, data); taskManager, data);
} }
/** /**
@ -153,12 +151,12 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
this.rss = rss; this.rss = rss;
if (!BackupManager.isBackupEnabled(rss.getConfiguration())) { if (!BackupManager.isBackupEnabled(rss.getConfiguration())) {
LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY
+ " setting"); + " setting");
return; return;
} }
ProcedureCoordinationManager coordManager = new ZKProcedureCoordinationManager(rss); ProcedureCoordinationManager coordManager = new ZKProcedureCoordinationManager(rss);
this.memberRpcs = coordManager this.memberRpcs = coordManager
.getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); .getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE);
// read in the backup handler configuration properties // read in the backup handler configuration properties
Configuration conf = rss.getConfiguration(); Configuration conf = rss.getConfiguration();
@ -166,7 +164,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
int opThreads = conf.getInt(BACKUP_REQUEST_THREADS_KEY, BACKUP_REQUEST_THREADS_DEFAULT); int opThreads = conf.getInt(BACKUP_REQUEST_THREADS_KEY, BACKUP_REQUEST_THREADS_DEFAULT);
// create the actual cohort member // create the actual cohort member
ThreadPoolExecutor pool = ThreadPoolExecutor pool =
ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive);
this.member = new ProcedureMember(memberRpcs, pool, new BackupSubprocedureBuilder()); this.member = new ProcedureMember(memberRpcs, pool, new BackupSubprocedureBuilder());
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.backup.util; package org.apache.hadoop.hbase.backup.util;
import java.util.List; import java.util.List;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.util; package org.apache.hadoop.hbase.backup.util;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
@ -83,8 +82,8 @@ public final class BackupUtils {
* @param rsLogTimestampMap timestamp map * @param rsLogTimestampMap timestamp map
* @return the min timestamp of each RS * @return the min timestamp of each RS
*/ */
public static Map<String, Long> getRSLogTimestampMins( public static Map<String, Long>
Map<TableName, Map<String, Long>> rsLogTimestampMap) { getRSLogTimestampMins(Map<TableName, Map<String, Long>> rsLogTimestampMap) {
if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) { if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) {
return null; return null;
} }
@ -114,13 +113,13 @@ public final class BackupUtils {
/** /**
* copy out Table RegionInfo into incremental backup image need to consider move this logic into * copy out Table RegionInfo into incremental backup image need to consider move this logic into
* HBackupFileSystem * HBackupFileSystem
* @param conn connection * @param conn connection
* @param backupInfo backup info * @param backupInfo backup info
* @param conf configuration * @param conf configuration
* @throws IOException exception * @throws IOException exception
*/ */
public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf) public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf)
throws IOException { throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf); Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf); FileSystem fs = rootDir.getFileSystem(conf);
@ -140,8 +139,8 @@ public final class BackupUtils {
FSTableDescriptors descriptors = FSTableDescriptors descriptors =
new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf)); new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf));
descriptors.createTableDescriptorForTableDirectory(target, orig, false); descriptors.createTableDescriptorForTableDirectory(target, orig, false);
LOG.debug("Attempting to copy table info for:" + table + " target: " + target + LOG.debug("Attempting to copy table info for:" + table + " target: " + target
" descriptor: " + orig); + " descriptor: " + orig);
LOG.debug("Finished copying tableinfo."); LOG.debug("Finished copying tableinfo.");
List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table); List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
// For each region, write the region info to disk // For each region, write the region info to disk
@ -161,7 +160,7 @@ public final class BackupUtils {
* Write the .regioninfo file on-disk. * Write the .regioninfo file on-disk.
*/ */
public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs, public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs,
final Path regionInfoDir, RegionInfo regionInfo) throws IOException { final Path regionInfoDir, RegionInfo regionInfo) throws IOException {
final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo); final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo);
Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR); Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR);
// First check to get the permissions // First check to get the permissions
@ -219,7 +218,7 @@ public final class BackupUtils {
/** /**
* Get the total length of files under the given directory recursively. * Get the total length of files under the given directory recursively.
* @param fs The hadoop file system * @param fs The hadoop file system
* @param dir The target directory * @param dir The target directory
* @return the total length of files * @return the total length of files
* @throws IOException exception * @throws IOException exception
@ -241,13 +240,13 @@ public final class BackupUtils {
/** /**
* Get list of all old WAL files (WALs and archive) * Get list of all old WAL files (WALs and archive)
* @param c configuration * @param c configuration
* @param hostTimestampMap {host,timestamp} map * @param hostTimestampMap {host,timestamp} map
* @return list of WAL files * @return list of WAL files
* @throws IOException exception * @throws IOException exception
*/ */
public static List<String> getWALFilesOlderThan(final Configuration c, public static List<String> getWALFilesOlderThan(final Configuration c,
final HashMap<String, Long> hostTimestampMap) throws IOException { final HashMap<String, Long> hostTimestampMap) throws IOException {
Path walRootDir = CommonFSUtils.getWALRootDir(c); Path walRootDir = CommonFSUtils.getWALRootDir(c);
Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME); Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
@ -292,7 +291,7 @@ public final class BackupUtils {
/** /**
* Check whether the backup path exist * Check whether the backup path exist
* @param backupStr backup * @param backupStr backup
* @param conf configuration * @param conf configuration
* @return Yes if path exists * @return Yes if path exists
* @throws IOException exception * @throws IOException exception
*/ */
@ -313,7 +312,7 @@ public final class BackupUtils {
/** /**
* Check target path first, confirm it doesn't exist before backup * Check target path first, confirm it doesn't exist before backup
* @param backupRootPath backup destination path * @param backupRootPath backup destination path
* @param conf configuration * @param conf configuration
* @throws IOException exception * @throws IOException exception
*/ */
public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException { public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException {
@ -325,8 +324,7 @@ public final class BackupUtils {
String newMsg = null; String newMsg = null;
if (expMsg.contains("No FileSystem for scheme")) { if (expMsg.contains("No FileSystem for scheme")) {
newMsg = newMsg =
"Unsupported filesystem scheme found in the backup target url. Error Message: " "Unsupported filesystem scheme found in the backup target url. Error Message: " + expMsg;
+ expMsg;
LOG.error(newMsg); LOG.error(newMsg);
throw new IOException(newMsg); throw new IOException(newMsg);
} else { } else {
@ -390,7 +388,7 @@ public final class BackupUtils {
} }
public static List<String> getFiles(FileSystem fs, Path rootDir, List<String> files, public static List<String> getFiles(FileSystem fs, Path rootDir, List<String> files,
PathFilter filter) throws IOException { PathFilter filter) throws IOException {
RemoteIterator<LocatedFileStatus> it = fs.listFiles(rootDir, true); RemoteIterator<LocatedFileStatus> it = fs.listFiles(rootDir, true);
while (it.hasNext()) { while (it.hasNext()) {
@ -414,7 +412,7 @@ public final class BackupUtils {
/** /**
* Clean up directories which are generated when DistCp copying hlogs * Clean up directories which are generated when DistCp copying hlogs
* @param backupInfo backup info * @param backupInfo backup info
* @param conf configuration * @param conf configuration
* @throws IOException exception * @throws IOException exception
*/ */
private static void cleanupHLogDir(BackupInfo backupInfo, Configuration conf) throws IOException { private static void cleanupHLogDir(BackupInfo backupInfo, Configuration conf) throws IOException {
@ -449,9 +447,8 @@ public final class BackupUtils {
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
for (TableName table : backupInfo.getTables()) { for (TableName table : backupInfo.getTables()) {
Path targetDirPath = Path targetDirPath = new Path(
new Path(getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
table));
if (outputFs.delete(targetDirPath, true)) { if (outputFs.delete(targetDirPath, true)) {
LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
} else { } else {
@ -468,7 +465,7 @@ public final class BackupUtils {
outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true); outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true);
} catch (IOException e1) { } catch (IOException e1) {
LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at " LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at "
+ backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + "."); + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
} }
} }
@ -477,15 +474,15 @@ public final class BackupUtils {
* which is also where the backup manifest file is. return value look like: * which is also where the backup manifest file is. return value look like:
* "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/" * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/"
* @param backupRootDir backup root directory * @param backupRootDir backup root directory
* @param backupId backup id * @param backupId backup id
* @param tableName table name * @param tableName table name
* @return backupPath String for the particular table * @return backupPath String for the particular table
*/ */
public static String getTableBackupDir(String backupRootDir, String backupId, public static String getTableBackupDir(String backupRootDir, String backupId,
TableName tableName) { TableName tableName) {
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
+ Path.SEPARATOR; + Path.SEPARATOR;
} }
/** /**
@ -510,13 +507,13 @@ public final class BackupUtils {
* Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates
* differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and
* return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException. * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException.
* @param fs file system * @param fs file system
* @param dir directory * @param dir directory
* @param filter path filter * @param filter path filter
* @return null if dir is empty or doesn't exist, otherwise FileStatus array * @return null if dir is empty or doesn't exist, otherwise FileStatus array
*/ */
public static FileStatus[] listStatus(final FileSystem fs, final Path dir, public static FileStatus[] listStatus(final FileSystem fs, final Path dir,
final PathFilter filter) throws IOException { final PathFilter filter) throws IOException {
FileStatus[] status = null; FileStatus[] status = null;
try { try {
status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter); status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
@ -535,8 +532,8 @@ public final class BackupUtils {
} }
/** /**
* Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the * Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the 'path'
* 'path' component of a Path's URI: e.g. If a Path is * component of a Path's URI: e.g. If a Path is
* <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns
* <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print * <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print
* out a Path without qualifying Filesystem instance. * out a Path without qualifying Filesystem instance.
@ -551,16 +548,16 @@ public final class BackupUtils {
* Given the backup root dir and the backup id, return the log file location for an incremental * Given the backup root dir and the backup id, return the log file location for an incremental
* backup. * backup.
* @param backupRootDir backup root directory * @param backupRootDir backup root directory
* @param backupId backup id * @param backupId backup id
* @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738" * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
*/ */
public static String getLogBackupDir(String backupRootDir, String backupId) { public static String getLogBackupDir(String backupRootDir, String backupId) {
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
+ HConstants.HREGION_LOGDIR_NAME; + HConstants.HREGION_LOGDIR_NAME;
} }
private static List<BackupInfo> getHistory(Configuration conf, Path backupRootPath) private static List<BackupInfo> getHistory(Configuration conf, Path backupRootPath)
throws IOException { throws IOException {
// Get all (n) history from backup root destination // Get all (n) history from backup root destination
FileSystem fs = FileSystem.get(backupRootPath.toUri(), conf); FileSystem fs = FileSystem.get(backupRootPath.toUri(), conf);
@ -605,7 +602,7 @@ public final class BackupUtils {
} }
public static List<BackupInfo> getHistory(Configuration conf, int n, Path backupRootPath, public static List<BackupInfo> getHistory(Configuration conf, int n, Path backupRootPath,
BackupInfo.Filter... filters) throws IOException { BackupInfo.Filter... filters) throws IOException {
List<BackupInfo> infos = getHistory(conf, backupRootPath); List<BackupInfo> infos = getHistory(conf, backupRootPath);
List<BackupInfo> ret = new ArrayList<>(); List<BackupInfo> ret = new ArrayList<>();
for (BackupInfo info : infos) { for (BackupInfo info : infos) {
@ -627,7 +624,7 @@ public final class BackupUtils {
} }
public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs) public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs)
throws IOException { throws IOException {
Path backupPath = new Path(backupRootPath, backupId); Path backupPath = new Path(backupRootPath, backupId);
RemoteIterator<LocatedFileStatus> it = fs.listFiles(backupPath, true); RemoteIterator<LocatedFileStatus> it = fs.listFiles(backupPath, true);
@ -646,24 +643,24 @@ public final class BackupUtils {
/** /**
* Create restore request. * Create restore request.
* @param backupRootDir backup root dir * @param backupRootDir backup root dir
* @param backupId backup id * @param backupId backup id
* @param check check only * @param check check only
* @param fromTables table list from * @param fromTables table list from
* @param toTables table list to * @param toTables table list to
* @param isOverwrite overwrite data * @param isOverwrite overwrite data
* @return request obkect * @return request obkect
*/ */
public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId, public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId,
boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) { boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
RestoreRequest.Builder builder = new RestoreRequest.Builder(); RestoreRequest.Builder builder = new RestoreRequest.Builder();
RestoreRequest request = RestoreRequest request =
builder.withBackupRootDir(backupRootDir).withBackupId(backupId).withCheck(check) builder.withBackupRootDir(backupRootDir).withBackupId(backupId).withCheck(check)
.withFromTables(fromTables).withToTables(toTables).withOvewrite(isOverwrite).build(); .withFromTables(fromTables).withToTables(toTables).withOvewrite(isOverwrite).build();
return request; return request;
} }
public static boolean validate(HashMap<TableName, BackupManifest> backupManifestMap, public static boolean validate(HashMap<TableName, BackupManifest> backupManifestMap,
Configuration conf) throws IOException { Configuration conf) throws IOException {
boolean isValid = true; boolean isValid = true;
for (Entry<TableName, BackupManifest> manifestEntry : backupManifestMap.entrySet()) { for (Entry<TableName, BackupManifest> manifestEntry : backupManifestMap.entrySet()) {
@ -678,7 +675,7 @@ public final class BackupUtils {
LOG.info("Dependent image(s) from old to new:"); LOG.info("Dependent image(s) from old to new:");
for (BackupImage image : imageSet) { for (BackupImage image : imageSet) {
String imageDir = String imageDir =
HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table); HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table);
if (!BackupUtils.checkPathExist(imageDir, conf)) { if (!BackupUtils.checkPathExist(imageDir, conf)) {
LOG.error("ERROR: backup image does not exist: " + imageDir); LOG.error("ERROR: backup image does not exist: " + imageDir);
isValid = false; isValid = false;
@ -691,13 +688,12 @@ public final class BackupUtils {
} }
public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit) public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit)
throws IOException { throws IOException {
FileSystem fs = FileSystem.get(conf); FileSystem fs = FileSystem.get(conf);
String tmp = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, String tmp =
fs.getHomeDirectory() + "/hbase-staging"); conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging");
Path path = Path path = new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-"
new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-" + EnvironmentEdgeManager.currentTime());
+ EnvironmentEdgeManager.currentTime());
if (deleteOnExit) { if (deleteOnExit) {
fs.deleteOnExit(path); fs.deleteOnExit(path);
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hbase.backup.util; package org.apache.hadoop.hbase.backup.util;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
@ -75,7 +74,7 @@ public class RestoreTool {
private final HashMap<TableName, Path> snapshotMap = new HashMap<>(); private final HashMap<TableName, Path> snapshotMap = new HashMap<>();
public RestoreTool(Configuration conf, final Path backupRootPath, final String backupId) public RestoreTool(Configuration conf, final Path backupRootPath, final String backupId)
throws IOException { throws IOException {
this.conf = conf; this.conf = conf;
this.backupRootPath = backupRootPath; this.backupRootPath = backupRootPath;
this.backupId = backupId; this.backupId = backupId;
@ -91,8 +90,8 @@ public class RestoreTool {
*/ */
Path getTableArchivePath(TableName tableName) throws IOException { Path getTableArchivePath(TableName tableName) throws IOException {
Path baseDir = Path baseDir =
new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId), new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
HConstants.HFILE_ARCHIVE_DIRECTORY); HConstants.HFILE_ARCHIVE_DIRECTORY);
Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR); Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
Path archivePath = new Path(dataDir, tableName.getNamespaceAsString()); Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
Path tableArchivePath = new Path(archivePath, tableName.getQualifierAsString()); Path tableArchivePath = new Path(archivePath, tableName.getQualifierAsString());
@ -142,16 +141,16 @@ public class RestoreTool {
* During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently * During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently
* tableNames and newTablesNames only contain single table, will be expanded to multiple tables in * tableNames and newTablesNames only contain single table, will be expanded to multiple tables in
* the future * the future
* @param conn HBase connection * @param conn HBase connection
* @param tableBackupPath backup path * @param tableBackupPath backup path
* @param logDirs : incremental backup folders, which contains WAL * @param logDirs : incremental backup folders, which contains WAL
* @param tableNames : source tableNames(table names were backuped) * @param tableNames : source tableNames(table names were backuped)
* @param newTableNames : target tableNames(table names to be restored to) * @param newTableNames : target tableNames(table names to be restored to)
* @param incrBackupId incremental backup Id * @param incrBackupId incremental backup Id
* @throws IOException exception * @throws IOException exception
*/ */
public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs, public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs,
TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException { TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
try (Admin admin = conn.getAdmin()) { try (Admin admin = conn.getAdmin()) {
if (tableNames.length != newTableNames.length) { if (tableNames.length != newTableNames.length) {
throw new IOException("Number of source tables and target tables does not match!"); throw new IOException("Number of source tables and target tables does not match!");
@ -163,7 +162,7 @@ public class RestoreTool {
for (TableName tableName : newTableNames) { for (TableName tableName : newTableNames) {
if (!admin.tableExists(tableName)) { if (!admin.tableExists(tableName)) {
throw new IOException("HBase table " + tableName throw new IOException("HBase table " + tableName
+ " does not exist. Create the table first, e.g. by restoring a full backup."); + " does not exist. Create the table first, e.g. by restoring a full backup.");
} }
} }
// adjust table schema // adjust table schema
@ -179,7 +178,7 @@ public class RestoreTool {
TableDescriptor newTableDescriptor = admin.getDescriptor(newTableName); TableDescriptor newTableDescriptor = admin.getDescriptor(newTableName);
List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies()); List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
List<ColumnFamilyDescriptor> existingFamilies = List<ColumnFamilyDescriptor> existingFamilies =
Arrays.asList(newTableDescriptor.getColumnFamilies()); Arrays.asList(newTableDescriptor.getColumnFamilies());
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor);
boolean schemaChangeNeeded = false; boolean schemaChangeNeeded = false;
for (ColumnFamilyDescriptor family : families) { for (ColumnFamilyDescriptor family : families) {
@ -206,8 +205,7 @@ public class RestoreTool {
} }
public void fullRestoreTable(Connection conn, Path tableBackupPath, TableName tableName, public void fullRestoreTable(Connection conn, Path tableBackupPath, TableName tableName,
TableName newTableName, boolean truncateIfExists, String lastIncrBackupId) TableName newTableName, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
throws IOException {
createAndRestoreTable(conn, tableName, newTableName, tableBackupPath, truncateIfExists, createAndRestoreTable(conn, tableName, newTableName, tableBackupPath, truncateIfExists,
lastIncrBackupId); lastIncrBackupId);
} }
@ -216,21 +214,20 @@ public class RestoreTool {
* Returns value represent path for path to backup table snapshot directory: * Returns value represent path for path to backup table snapshot directory:
* "/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot" * "/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot"
* @param backupRootPath backup root path * @param backupRootPath backup root path
* @param tableName table name * @param tableName table name
* @param backupId backup Id * @param backupId backup Id
* @return path for snapshot * @return path for snapshot
*/ */
Path getTableSnapshotPath(Path backupRootPath, TableName tableName, String backupId) { Path getTableSnapshotPath(Path backupRootPath, TableName tableName, String backupId) {
return new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId), return new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
HConstants.SNAPSHOT_DIR_NAME); HConstants.SNAPSHOT_DIR_NAME);
} }
/** /**
* Returns value represent path for: * Returns value represent path for:
* ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/ * ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/
* snapshot_1396650097621_namespace_table" * snapshot_1396650097621_namespace_table" this path contains .snapshotinfo, .tabledesc (0.96 and
* this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo, * 0.98) this path contains .snapshotinfo, .data.manifest (trunk)
* .data.manifest (trunk)
* @param tableName table name * @param tableName table name
* @return path to table info * @return path to table info
* @throws IOException exception * @throws IOException exception
@ -241,7 +238,7 @@ public class RestoreTool {
// can't build the path directly as the timestamp values are different // can't build the path directly as the timestamp values are different
FileStatus[] snapshots = fs.listStatus(tableSnapShotPath, FileStatus[] snapshots = fs.listStatus(tableSnapShotPath,
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
for (FileStatus snapshot : snapshots) { for (FileStatus snapshot : snapshots) {
tableInfoPath = snapshot.getPath(); tableInfoPath = snapshot.getPath();
// SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest"; // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest";
@ -264,28 +261,27 @@ public class RestoreTool {
TableDescriptor tableDescriptor = manifest.getTableDescriptor(); TableDescriptor tableDescriptor = manifest.getTableDescriptor();
if (!tableDescriptor.getTableName().equals(tableName)) { if (!tableDescriptor.getTableName().equals(tableName)) {
LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
+ tableInfoPath.toString()); + tableInfoPath.toString());
LOG.error("tableDescriptor.getNameAsString() = " LOG.error(
+ tableDescriptor.getTableName().getNameAsString()); "tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString());
throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName
+ " under tableInfoPath: " + tableInfoPath.toString()); + " under tableInfoPath: " + tableInfoPath.toString());
} }
return tableDescriptor; return tableDescriptor;
} }
private TableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName, private TableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName,
String lastIncrBackupId) throws IOException { String lastIncrBackupId) throws IOException {
if (lastIncrBackupId != null) { if (lastIncrBackupId != null) {
String target = String target =
BackupUtils.getTableBackupDir(backupRootPath.toString(), BackupUtils.getTableBackupDir(backupRootPath.toString(), lastIncrBackupId, tableName);
lastIncrBackupId, tableName);
return FSTableDescriptors.getTableDescriptorFromFs(fileSys, new Path(target)); return FSTableDescriptors.getTableDescriptorFromFs(fileSys, new Path(target));
} }
return null; return null;
} }
private void createAndRestoreTable(Connection conn, TableName tableName, TableName newTableName, private void createAndRestoreTable(Connection conn, TableName tableName, TableName newTableName,
Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException { Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
if (newTableName == null) { if (newTableName == null) {
newTableName = tableName; newTableName = tableName;
} }
@ -304,7 +300,7 @@ public class RestoreTool {
// check whether snapshot dir already recorded for target table // check whether snapshot dir already recorded for target table
if (snapshotMap.get(tableName) != null) { if (snapshotMap.get(tableName) != null) {
SnapshotDescription desc = SnapshotDescription desc =
SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath); SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc); SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc);
tableDescriptor = manifest.getTableDescriptor(); tableDescriptor = manifest.getTableDescriptor();
} else { } else {
@ -315,8 +311,8 @@ public class RestoreTool {
LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost"); LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost");
} }
} else { } else {
throw new IOException("Table snapshot directory: " + throw new IOException(
tableSnapshotPath + " does not exist."); "Table snapshot directory: " + tableSnapshotPath + " does not exist.");
} }
} }
@ -326,15 +322,15 @@ public class RestoreTool {
// find table descriptor but no archive dir means the table is empty, create table and exit // find table descriptor but no archive dir means the table is empty, create table and exit
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("find table descriptor but no archive dir for table " + tableName LOG.debug("find table descriptor but no archive dir for table " + tableName
+ ", will only create table"); + ", will only create table");
} }
tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor); tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor, checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor,
truncateIfExists); truncateIfExists);
return; return;
} else { } else {
throw new IllegalStateException("Cannot restore hbase table because directory '" throw new IllegalStateException(
+ " tableArchivePath is null."); "Cannot restore hbase table because directory '" + " tableArchivePath is null.");
} }
} }
@ -356,7 +352,8 @@ public class RestoreTool {
RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf); RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
Path[] paths = new Path[regionPathList.size()]; Path[] paths = new Path[regionPathList.size()];
regionPathList.toArray(paths); regionPathList.toArray(paths);
restoreService.run(paths, new TableName[]{tableName}, new TableName[] {newTableName}, true); restoreService.run(paths, new TableName[] { tableName }, new TableName[] { newTableName },
true);
} catch (Exception e) { } catch (Exception e) {
LOG.error(e.toString(), e); LOG.error(e.toString(), e);
@ -430,9 +427,11 @@ public class RestoreTool {
// start to parse hfile inside one family dir // start to parse hfile inside one family dir
Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir)); Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
for (Path hfile : hfiles) { for (Path hfile : hfiles) {
if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".") if (
hfile.getName().startsWith("_") || hfile.getName().startsWith(".")
|| StoreFileInfo.isReference(hfile.getName()) || StoreFileInfo.isReference(hfile.getName())
|| HFileLink.isHFileLink(hfile.getName())) { || HFileLink.isHFileLink(hfile.getName())
) {
continue; continue;
} }
HFile.Reader reader = HFile.createReader(fs, hfile, conf); HFile.Reader reader = HFile.createReader(fs, hfile, conf);
@ -441,7 +440,7 @@ public class RestoreTool {
first = reader.getFirstRowKey().get(); first = reader.getFirstRowKey().get();
last = reader.getLastRowKey().get(); last = reader.getLastRowKey().get();
LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first=" LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first="
+ Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
// To eventually infer start key-end key boundaries // To eventually infer start key-end key boundaries
Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0; Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0;
@ -460,24 +459,24 @@ public class RestoreTool {
/** /**
* Prepare the table for bulkload, most codes copied from {@code createTable} method in * Prepare the table for bulkload, most codes copied from {@code createTable} method in
* {@code BulkLoadHFilesTool}. * {@code BulkLoadHFilesTool}.
* @param conn connection * @param conn connection
* @param tableBackupPath path * @param tableBackupPath path
* @param tableName table name * @param tableName table name
* @param targetTableName target table name * @param targetTableName target table name
* @param regionDirList region directory list * @param regionDirList region directory list
* @param htd table descriptor * @param htd table descriptor
* @param truncateIfExists truncates table if exists * @param truncateIfExists truncates table if exists
* @throws IOException exception * @throws IOException exception
*/ */
private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName, private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName,
TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd, TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd,
boolean truncateIfExists) throws IOException { boolean truncateIfExists) throws IOException {
try (Admin admin = conn.getAdmin()) { try (Admin admin = conn.getAdmin()) {
boolean createNew = false; boolean createNew = false;
if (admin.tableExists(targetTableName)) { if (admin.tableExists(targetTableName)) {
if (truncateIfExists) { if (truncateIfExists) {
LOG.info("Truncating exising target table '" + targetTableName LOG.info(
+ "', preserving region splits"); "Truncating exising target table '" + targetTableName + "', preserving region splits");
admin.disableTable(targetTableName); admin.disableTable(targetTableName);
admin.truncateTable(targetTableName, true); admin.truncateTable(targetTableName, true);
} else { } else {
@ -497,7 +496,7 @@ public class RestoreTool {
// create table using table descriptor and region boundaries // create table using table descriptor and region boundaries
admin.createTable(htd, keys); admin.createTable(htd, keys);
} }
} catch (NamespaceNotFoundException e){ } catch (NamespaceNotFoundException e) {
LOG.warn("There was no namespace and the same will be created"); LOG.warn("There was no namespace and the same will be created");
String namespaceAsString = targetTableName.getNamespaceAsString(); String namespaceAsString = targetTableName.getNamespaceAsString();
LOG.info("Creating target namespace '" + namespaceAsString + "'"); LOG.info("Creating target namespace '" + namespaceAsString + "'");
@ -519,7 +518,7 @@ public class RestoreTool {
} }
if (EnvironmentEdgeManager.currentTime() - startTime > TABLE_AVAILABILITY_WAIT_TIME) { if (EnvironmentEdgeManager.currentTime() - startTime > TABLE_AVAILABILITY_WAIT_TIME) {
throw new IOException("Time out " + TABLE_AVAILABILITY_WAIT_TIME + "ms expired, table " throw new IOException("Time out " + TABLE_AVAILABILITY_WAIT_TIME + "ms expired, table "
+ targetTableName + " is still not available"); + targetTableName + " is still not available");
} }
} }
} }

View File

@ -1,5 +1,4 @@
/* /*
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -111,8 +110,8 @@ public class TestBackupBase {
public IncrementalTableBackupClientForTest() { public IncrementalTableBackupClientForTest() {
} }
public IncrementalTableBackupClientForTest(Connection conn, public IncrementalTableBackupClientForTest(Connection conn, String backupId,
String backupId, BackupRequest request) throws IOException { BackupRequest request) throws IOException {
super(conn, backupId, request); super(conn, backupId, request);
} }
@ -127,13 +126,13 @@ public class TestBackupBase {
failStageIf(Stage.stage_1); failStageIf(Stage.stage_1);
backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL); backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL);
LOG.debug("For incremental backup, current table set is " LOG.debug("For incremental backup, current table set is "
+ backupManager.getIncrementalBackupTableSet()); + backupManager.getIncrementalBackupTableSet());
newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap(); newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
// copy out the table and region info files for each table // copy out the table and region info files for each table
BackupUtils.copyTableRegionInfo(conn, backupInfo, conf); BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
// convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
convertWALsToHFiles(); convertWALsToHFiles();
incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()}, incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() },
backupInfo.getBackupRootDir()); backupInfo.getBackupRootDir());
failStageIf(Stage.stage_2); failStageIf(Stage.stage_2);
@ -142,7 +141,7 @@ public class TestBackupBase {
// After this checkpoint, even if entering cancel process, will let the backup finished // After this checkpoint, even if entering cancel process, will let the backup finished
// Set the previousTimestampMap which is before this current log roll to the manifest. // Set the previousTimestampMap which is before this current log roll to the manifest.
Map<TableName, Map<String, Long>> previousTimestampMap = Map<TableName, Map<String, Long>> previousTimestampMap =
backupManager.readLogTimestampMap(); backupManager.readLogTimestampMap();
backupInfo.setIncrTimestampMap(previousTimestampMap); backupInfo.setIncrTimestampMap(previousTimestampMap);
// The table list in backupInfo is good for both full backup and incremental backup. // The table list in backupInfo is good for both full backup and incremental backup.
@ -151,10 +150,10 @@ public class TestBackupBase {
failStageIf(Stage.stage_3); failStageIf(Stage.stage_3);
Map<TableName, Map<String, Long>> newTableSetTimestampMap = Map<TableName, Map<String, Long>> newTableSetTimestampMap =
backupManager.readLogTimestampMap(); backupManager.readLogTimestampMap();
Long newStartCode = Long newStartCode =
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode); backupManager.writeBackupStartCode(newStartCode);
handleBulkLoad(backupInfo.getTableNames()); handleBulkLoad(backupInfo.getTableNames());
@ -176,7 +175,7 @@ public class TestBackupBase {
} }
public FullTableBackupClientForTest(Connection conn, String backupId, BackupRequest request) public FullTableBackupClientForTest(Connection conn, String backupId, BackupRequest request)
throws IOException { throws IOException {
super(conn, backupId, request); super(conn, backupId, request);
} }
@ -215,9 +214,8 @@ public class TestBackupBase {
// SNAPSHOT_TABLES: // SNAPSHOT_TABLES:
backupInfo.setPhase(BackupPhase.SNAPSHOT); backupInfo.setPhase(BackupPhase.SNAPSHOT);
for (TableName tableName : tableList) { for (TableName tableName : tableList) {
String snapshotName = String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime())
"snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_" + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
+ tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
snapshotTable(admin, tableName, snapshotName); snapshotTable(admin, tableName, snapshotName);
backupInfo.setSnapshotName(tableName, snapshotName); backupInfo.setSnapshotName(tableName, snapshotName);
@ -239,11 +237,10 @@ public class TestBackupBase {
backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps); backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
Map<TableName, Map<String, Long>> newTableSetTimestampMap = Map<TableName, Map<String, Long>> newTableSetTimestampMap =
backupManager.readLogTimestampMap(); backupManager.readLogTimestampMap();
Long newStartCode = Long newStartCode =
BackupUtils.getMinValue(BackupUtils BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode); backupManager.writeBackupStartCode(newStartCode);
failStageIf(Stage.stage_4); failStageIf(Stage.stage_4);
// backup complete // backup complete
@ -251,7 +248,7 @@ public class TestBackupBase {
} catch (Exception e) { } catch (Exception e) {
if(autoRestoreOnFailure) { if (autoRestoreOnFailure) {
failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ", failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ",
BackupType.FULL, conf); BackupType.FULL, conf);
} }
@ -261,13 +258,13 @@ public class TestBackupBase {
} }
public static void setUpHelper() throws Exception { public static void setUpHelper() throws Exception {
BACKUP_ROOT_DIR = Path.SEPARATOR +"backupUT"; BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT";
BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT"; BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT";
if (secure) { if (secure) {
// set the always on security provider // set the always on security provider
UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(), UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
HadoopSecurityEnabledUserProviderForTesting.class); HadoopSecurityEnabledUserProviderForTesting.class);
// setup configuration // setup configuration
SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
} }
@ -299,23 +296,21 @@ public class TestBackupBase {
TEST_UTIL.startMiniMapReduceCluster(); TEST_UTIL.startMiniMapReduceCluster();
BACKUP_ROOT_DIR = BACKUP_ROOT_DIR =
new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), BACKUP_ROOT_DIR)
BACKUP_ROOT_DIR).toString(); .toString();
LOG.info("ROOTDIR " + BACKUP_ROOT_DIR); LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
if (useSecondCluster) { if (useSecondCluster) {
BACKUP_REMOTE_ROOT_DIR = BACKUP_REMOTE_ROOT_DIR = new Path(
new Path(new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) + BACKUP_REMOTE_ROOT_DIR)
+ BACKUP_REMOTE_ROOT_DIR).toString(); .toString();
LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR); LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
} }
createTables(); createTables();
populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1); populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1);
} }
/** /**
* Setup Cluster with appropriate configurations before running tests. * Setup Cluster with appropriate configurations before running tests.
*
* @throws Exception if starting the mini cluster or setting up the tables fails * @throws Exception if starting the mini cluster or setting up the tables fails
*/ */
@BeforeClass @BeforeClass
@ -327,7 +322,6 @@ public class TestBackupBase {
setUpHelper(); setUpHelper();
} }
private static void populateFromMasterConfig(Configuration masterConf, Configuration conf) { private static void populateFromMasterConfig(Configuration masterConf, Configuration conf) {
Iterator<Entry<String, String>> it = masterConf.iterator(); Iterator<Entry<String, String>> it = masterConf.iterator();
while (it.hasNext()) { while (it.hasNext()) {
@ -341,7 +335,7 @@ public class TestBackupBase {
*/ */
@AfterClass @AfterClass
public static void tearDown() throws Exception { public static void tearDown() throws Exception {
try{ try {
SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin()); SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin());
} catch (Exception e) { } catch (Exception e) {
} }
@ -356,7 +350,7 @@ public class TestBackupBase {
} }
Table insertIntoTable(Connection conn, TableName table, byte[] family, int id, int numRows) Table insertIntoTable(Connection conn, TableName table, byte[] family, int id, int numRows)
throws IOException { throws IOException {
Table t = conn.getTable(table); Table t = conn.getTable(table);
Put p1; Put p1;
for (int i = 0; i < numRows; i++) { for (int i = 0; i < numRows; i++) {
@ -367,17 +361,16 @@ public class TestBackupBase {
return t; return t;
} }
protected BackupRequest createBackupRequest(BackupType type, protected BackupRequest createBackupRequest(BackupType type, List<TableName> tables,
List<TableName> tables, String path) { String path) {
BackupRequest.Builder builder = new BackupRequest.Builder(); BackupRequest.Builder builder = new BackupRequest.Builder();
BackupRequest request = builder.withBackupType(type) BackupRequest request =
.withTableList(tables) builder.withBackupType(type).withTableList(tables).withTargetRootDir(path).build();
.withTargetRootDir(path).build();
return request; return request;
} }
protected String backupTables(BackupType type, List<TableName> tables, String path) protected String backupTables(BackupType type, List<TableName> tables, String path)
throws IOException { throws IOException {
Connection conn = null; Connection conn = null;
BackupAdmin badmin = null; BackupAdmin badmin = null;
String backupId; String backupId;

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -35,13 +35,12 @@ public class TestBackupBoundaryTests extends TestBackupBase {
@ClassRule @ClassRule
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestBackupBoundaryTests.class); HBaseClassTestRule.forClass(TestBackupBoundaryTests.class);
private static final Logger LOG = LoggerFactory.getLogger(TestBackupBoundaryTests.class); private static final Logger LOG = LoggerFactory.getLogger(TestBackupBoundaryTests.class);
/** /**
* Verify that full backup is created on a single empty table correctly. * Verify that full backup is created on a single empty table correctly.
*
* @throws Exception if doing the full backup fails * @throws Exception if doing the full backup fails
*/ */
@Test @Test
@ -53,7 +52,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
/** /**
* Verify that full backup is created on multiple empty tables correctly. * Verify that full backup is created on multiple empty tables correctly.
*
* @throws Exception if doing the full backup fails * @throws Exception if doing the full backup fails
*/ */
@Test @Test
@ -66,7 +64,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
/** /**
* Verify that full backup fails on a single table that does not exist. * Verify that full backup fails on a single table that does not exist.
*
* @throws Exception if doing the full backup fails * @throws Exception if doing the full backup fails
*/ */
@Test(expected = IOException.class) @Test(expected = IOException.class)
@ -78,7 +75,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
/** /**
* Verify that full backup fails on multiple tables that do not exist. * Verify that full backup fails on multiple tables that do not exist.
*
* @throws Exception if doing the full backup fails * @throws Exception if doing the full backup fails
*/ */
@Test(expected = IOException.class) @Test(expected = IOException.class)
@ -90,7 +86,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
/** /**
* Verify that full backup fails on tableset containing real and fake tables. * Verify that full backup fails on tableset containing real and fake tables.
*
* @throws Exception if doing the full backup fails * @throws Exception if doing the full backup fails
*/ */
@Test(expected = IOException.class) @Test(expected = IOException.class)

Some files were not shown because too many files have changed in this diff Show More