parent
0edecbf9e0
commit
9c8c9e7fbf
|
@ -106,5 +106,3 @@ else
|
|||
echo "No command specified" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
|
|
|
@ -33,4 +33,3 @@ export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin:
|
|||
export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData"}"
|
||||
|
||||
ulimit -n
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
<?xml version="1.0"?>
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<!--
|
||||
/**
|
||||
|
@ -21,8 +21,8 @@
|
|||
-->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<artifactId>hbase</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase</artifactId>
|
||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||
<relativePath>..</relativePath>
|
||||
</parent>
|
||||
|
|
|
@ -15,13 +15,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to the client. This tests the hbase-client package and all of the client
|
||||
* tests in hbase-server.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to coprocessors.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as failing commonly on public build infrastructure.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,13 +15,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and
|
||||
* the like.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,23 +15,20 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as 'integration/system' test, meaning that the test class has the following
|
||||
* characteristics:
|
||||
* <ul>
|
||||
* <li> Possibly takes hours to complete</li>
|
||||
* <li> Can be run on a mini cluster or an actual cluster</li>
|
||||
* <li> Can make changes to the given cluster (starting stopping daemons, etc)</li>
|
||||
* <li> Should not be run in parallel of other integration tests</li>
|
||||
* <li>Possibly takes hours to complete</li>
|
||||
* <li>Can be run on a mini cluster or an actual cluster</li>
|
||||
* <li>Can make changes to the given cluster (starting stopping daemons, etc)</li>
|
||||
* <li>Should not be run in parallel of other integration tests</li>
|
||||
* </ul>
|
||||
*
|
||||
* Integration / System tests should have a class name starting with "IntegrationTest", and
|
||||
* should be annotated with @Category(IntegrationTests.class). Integration tests can be run
|
||||
* using the IntegrationTestsDriver class or from mvn verify.
|
||||
*
|
||||
* Integration / System tests should have a class name starting with "IntegrationTest", and should
|
||||
* be annotated with @Category(IntegrationTests.class). Integration tests can be run using the
|
||||
* IntegrationTestsDriver class or from mvn verify.
|
||||
* @see SmallTests
|
||||
* @see MediumTests
|
||||
* @see LargeTests
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,21 +15,19 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tagging a test as 'large', means that the test class has the following characteristics:
|
||||
* <ul>
|
||||
* <li>it can executed in an isolated JVM (Tests can however be executed in different JVM on the
|
||||
* same machine simultaneously so be careful two concurrent tests end up fighting over ports
|
||||
* or other singular resources).</li>
|
||||
* <li>ideally, the whole large test-suite/class, no matter how many or how few test methods it
|
||||
* has, will run in last less than three minutes</li>
|
||||
* <li>No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests'
|
||||
* if you need to run tests longer than this.</li>
|
||||
* <li>it can executed in an isolated JVM (Tests can however be executed in different JVM on the
|
||||
* same machine simultaneously so be careful two concurrent tests end up fighting over ports or
|
||||
* other singular resources).</li>
|
||||
* <li>ideally, the whole large test-suite/class, no matter how many or how few test methods it has,
|
||||
* will run in last less than three minutes</li>
|
||||
* <li>No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests'
|
||||
* if you need to run tests longer than this.</li>
|
||||
* </ul>
|
||||
*
|
||||
* @see SmallTests
|
||||
* @see MediumTests
|
||||
* @see IntegrationTests
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to mapred or mapreduce.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to the master.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,21 +15,18 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tagging a test as 'medium' means that the test class has the following characteristics:
|
||||
* <ul>
|
||||
* <li>it can be executed in an isolated JVM (Tests can however be executed in different JVMs on
|
||||
* the same machine simultaneously so be careful two concurrent tests end up fighting over ports
|
||||
* or other singular resources).</li>
|
||||
* <li>ideally, the whole medium test-suite/class, no matter how many or how few test methods it
|
||||
* has, will complete in 50 seconds; otherwise make it a 'large' test.</li>
|
||||
* <li>it can be executed in an isolated JVM (Tests can however be executed in different JVMs on the
|
||||
* same machine simultaneously so be careful two concurrent tests end up fighting over ports or
|
||||
* other singular resources).</li>
|
||||
* <li>ideally, the whole medium test-suite/class, no matter how many or how few test methods it
|
||||
* has, will complete in 50 seconds; otherwise make it a 'large' test.</li>
|
||||
* </ul>
|
||||
*
|
||||
* Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster.
|
||||
*
|
||||
* Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster.
|
||||
* @see SmallTests
|
||||
* @see LargeTests
|
||||
* @see IntegrationTests
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as not easily falling into any of the below categories.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to RPC.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to the regionserver.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to replication.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to the REST capability of HBase.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to security.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -20,14 +20,14 @@ package org.apache.hadoop.hbase.testclassification;
|
|||
/**
|
||||
* Tagging a test as 'small' means that the test class has the following characteristics:
|
||||
* <ul>
|
||||
* <li>it can be run simultaneously with other small tests all in the same JVM</li>
|
||||
* <li>ideally, the WHOLE implementing test-suite/class, no matter how many or how few test
|
||||
* methods it has, should take less than 15 seconds to complete</li>
|
||||
* <li>it does not use a cluster</li>
|
||||
* <li>it can be run simultaneously with other small tests all in the same JVM</li>
|
||||
* <li>ideally, the WHOLE implementing test-suite/class, no matter how many or how few test methods
|
||||
* it has, should take less than 15 seconds to complete</li>
|
||||
* <li>it does not use a cluster</li>
|
||||
* </ul>
|
||||
*
|
||||
* @see MediumTests
|
||||
* @see LargeTests
|
||||
* @see IntegrationTests
|
||||
*/
|
||||
public interface SmallTests {}
|
||||
public interface SmallTests {
|
||||
}
|
||||
|
|
|
@ -15,13 +15,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build
|
||||
* Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build
|
||||
* infrastructure.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,13 +15,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
* Tag a test as region tests which takes longer than 5 minutes to run on public build
|
||||
* infrastructure.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.testclassification;
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
<?xml version="1.0"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<!--
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
|
@ -23,8 +22,8 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<artifactId>hbase-archetypes</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-archetypes</artifactId>
|
||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||
<relativePath>..</relativePath>
|
||||
</parent>
|
||||
|
@ -58,10 +57,10 @@
|
|||
further using xml-maven-plugin for xslt transformation, below. -->
|
||||
<execution>
|
||||
<id>hbase-client__copy-src-to-build-archetype-subdir</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${build.archetype.subdir}</outputDirectory>
|
||||
<resources>
|
||||
|
@ -76,29 +75,30 @@
|
|||
</execution>
|
||||
<execution>
|
||||
<id>hbase-client__copy-pom-to-temp-for-xslt-processing</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.exemplar.subdir}</outputDirectory>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>/${project.basedir}/../${hbase-client.dir}</directory>
|
||||
<filtering>true</filtering> <!-- filtering replaces ${project.version} with literal -->
|
||||
<filtering>true</filtering>
|
||||
<!-- filtering replaces ${project.version} with literal -->
|
||||
<includes>
|
||||
<include>pom.xml</include>
|
||||
</includes>
|
||||
</resource>
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>hbase-shaded-client__copy-src-to-build-archetype-subdir</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${build.archetype.subdir}</outputDirectory>
|
||||
<resources>
|
||||
|
@ -113,20 +113,21 @@
|
|||
</execution>
|
||||
<execution>
|
||||
<id>hbase-shaded-client__copy-pom-to-temp-for-xslt-processing</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.exemplar.subdir}</outputDirectory>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>/${project.basedir}/../${hbase-shaded-client.dir}</directory>
|
||||
<filtering>true</filtering> <!-- filtering replaces ${project.version} with literal -->
|
||||
<filtering>true</filtering>
|
||||
<!-- filtering replaces ${project.version} with literal -->
|
||||
<includes>
|
||||
<include>pom.xml</include>
|
||||
</includes>
|
||||
</resource>
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
@ -137,10 +138,10 @@
|
|||
using xml-maven-plugin for xslt transformation, below. -->
|
||||
<execution>
|
||||
<id>hbase-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<phase>prepare-package</phase>
|
||||
<configuration>
|
||||
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.archetype.subdir}</outputDirectory>
|
||||
<resources>
|
||||
|
@ -149,16 +150,16 @@
|
|||
<includes>
|
||||
<include>pom.xml</include>
|
||||
</includes>
|
||||
</resource>
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>hbase-shaded-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<phase>prepare-package</phase>
|
||||
<configuration>
|
||||
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.archetype.subdir}</outputDirectory>
|
||||
<resources>
|
||||
|
@ -167,7 +168,7 @@
|
|||
<includes>
|
||||
<include>pom.xml</include>
|
||||
</includes>
|
||||
</resource>
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
@ -182,10 +183,10 @@
|
|||
<!-- xml-maven-plugin modifies each exemplar project's pom.xml file to convert to standalone project. -->
|
||||
<execution>
|
||||
<id>modify-exemplar-pom-files-via-xslt</id>
|
||||
<phase>process-resources</phase>
|
||||
<goals>
|
||||
<goal>transform</goal>
|
||||
</goals>
|
||||
<phase>process-resources</phase>
|
||||
<configuration>
|
||||
<transformationSets>
|
||||
<transformationSet>
|
||||
|
@ -212,10 +213,10 @@
|
|||
prevent warnings when project is generated from archetype. -->
|
||||
<execution>
|
||||
<id>modify-archetype-pom-files-via-xslt</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>transform</goal>
|
||||
</goals>
|
||||
<phase>package</phase>
|
||||
<configuration>
|
||||
<transformationSets>
|
||||
<transformationSet>
|
||||
|
@ -242,32 +243,32 @@
|
|||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<executions>
|
||||
<!-- exec-maven-plugin executes chmod to make scripts executable -->
|
||||
<execution>
|
||||
<id>make-scripts-executable</id>
|
||||
<phase>process-resources</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<phase>process-resources</phase>
|
||||
<configuration>
|
||||
<chmod file="${project.basedir}/createArchetypes.sh" perm="+x" />
|
||||
<chmod file="${project.basedir}/installArchetypes.sh" perm="+x" />
|
||||
<chmod file="${project.basedir}/createArchetypes.sh" perm="+x"/>
|
||||
<chmod file="${project.basedir}/installArchetypes.sh" perm="+x"/>
|
||||
</configuration>
|
||||
</execution>
|
||||
<!-- exec-maven-plugin executes script which invokes 'archetype:create-from-project'
|
||||
to derive archetypes from exemplar projects. -->
|
||||
<execution>
|
||||
<id>run-createArchetypes-script</id>
|
||||
<phase>compile</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<phase>compile</phase>
|
||||
<configuration>
|
||||
<exec executable="${shell-executable}" dir="${project.basedir}" failonerror="true">
|
||||
<arg line="./createArchetypes.sh"/>
|
||||
</exec>
|
||||
<exec dir="${project.basedir}" executable="${shell-executable}" failonerror="true">
|
||||
<arg line="./createArchetypes.sh"/>
|
||||
</exec>
|
||||
</configuration>
|
||||
</execution>
|
||||
<!-- exec-maven-plugin executes script which invokes 'install' to install each
|
||||
|
@ -277,14 +278,14 @@
|
|||
which does test generation of a project based on the archetype. -->
|
||||
<execution>
|
||||
<id>run-installArchetypes-script</id>
|
||||
<phase>install</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<phase>install</phase>
|
||||
<configuration>
|
||||
<exec executable="${shell-executable}" dir="${project.basedir}" failonerror="true">
|
||||
<arg line="./installArchetypes.sh"/>
|
||||
</exec>
|
||||
<exec dir="${project.basedir}" executable="${shell-executable}" failonerror="true">
|
||||
<arg line="./installArchetypes.sh"/>
|
||||
</exec>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation=
|
||||
"https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<!--
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
|
@ -24,8 +21,8 @@
|
|||
-->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<artifactId>hbase-archetypes</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-archetypes</artifactId>
|
||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||
<relativePath>..</relativePath>
|
||||
</parent>
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -38,19 +37,17 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Successful running of this application requires access to an active instance
|
||||
* of HBase. For install instructions for a standalone instance of HBase, please
|
||||
* refer to https://hbase.apache.org/book.html#quickstart
|
||||
* Successful running of this application requires access to an active instance of HBase. For
|
||||
* install instructions for a standalone instance of HBase, please refer to
|
||||
* https://hbase.apache.org/book.html#quickstart
|
||||
*/
|
||||
public final class HelloHBase {
|
||||
|
||||
protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
|
||||
static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
|
||||
static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
|
||||
static final byte[] MY_FIRST_COLUMN_QUALIFIER
|
||||
= Bytes.toBytes("myFirstColumn");
|
||||
static final byte[] MY_SECOND_COLUMN_QUALIFIER
|
||||
= Bytes.toBytes("mySecondColumn");
|
||||
static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn");
|
||||
static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn");
|
||||
static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01");
|
||||
|
||||
// Private constructor included here to avoid checkstyle warnings
|
||||
|
@ -61,20 +58,20 @@ public final class HelloHBase {
|
|||
final boolean deleteAllAtEOJ = true;
|
||||
|
||||
/**
|
||||
* ConnectionFactory#createConnection() automatically looks for
|
||||
* hbase-site.xml (HBase configuration parameters) on the system's
|
||||
* CLASSPATH, to enable creation of Connection to HBase via ZooKeeper.
|
||||
* ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase
|
||||
* configuration parameters) on the system's CLASSPATH, to enable creation of Connection to
|
||||
* HBase via ZooKeeper.
|
||||
*/
|
||||
try (Connection connection = ConnectionFactory.createConnection();
|
||||
Admin admin = connection.getAdmin()) {
|
||||
Admin admin = connection.getAdmin()) {
|
||||
admin.getClusterMetrics(); // assure connection successfully established
|
||||
System.out.println("\n*** Hello HBase! -- Connection has been "
|
||||
+ "established via ZooKeeper!!\n");
|
||||
System.out
|
||||
.println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n");
|
||||
|
||||
createNamespaceAndTable(admin);
|
||||
|
||||
System.out.println("Getting a Table object for [" + MY_TABLE_NAME
|
||||
+ "] with which to perform CRUD operations in HBase.");
|
||||
+ "] with which to perform CRUD operations in HBase.");
|
||||
try (Table table = connection.getTable(MY_TABLE_NAME)) {
|
||||
|
||||
putRowToTable(table);
|
||||
|
@ -92,9 +89,8 @@ public final class HelloHBase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Invokes Admin#createNamespace and Admin#createTable to create a namespace
|
||||
* with a table that has one column-family.
|
||||
*
|
||||
* Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has
|
||||
* one column-family.
|
||||
* @param admin Standard Admin object
|
||||
* @throws IOException If IO problem encountered
|
||||
*/
|
||||
|
@ -103,48 +99,38 @@ public final class HelloHBase {
|
|||
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
|
||||
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
|
||||
|
||||
admin.createNamespace(NamespaceDescriptor
|
||||
.create(MY_NAMESPACE_NAME).build());
|
||||
admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build());
|
||||
}
|
||||
if (!admin.tableExists(MY_TABLE_NAME)) {
|
||||
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
|
||||
+ "], with one Column Family ["
|
||||
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
|
||||
+ "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
|
||||
TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
|
||||
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME))
|
||||
.build();
|
||||
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build();
|
||||
admin.createTable(desc);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes Table#put to store a row (with two new columns created 'on the
|
||||
* fly') into the table.
|
||||
*
|
||||
* Invokes Table#put to store a row (with two new columns created 'on the fly') into the table.
|
||||
* @param table Standard Table object (used for CRUD operations).
|
||||
* @throws IOException If IO problem encountered
|
||||
*/
|
||||
static void putRowToTable(final Table table) throws IOException {
|
||||
|
||||
table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME,
|
||||
MY_FIRST_COLUMN_QUALIFIER,
|
||||
Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME,
|
||||
MY_SECOND_COLUMN_QUALIFIER,
|
||||
Bytes.toBytes("World!")));
|
||||
table.put(new Put(MY_ROW_ID)
|
||||
.addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello"))
|
||||
.addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!")));
|
||||
|
||||
System.out.println("Row [" + Bytes.toString(MY_ROW_ID)
|
||||
+ "] was put into Table ["
|
||||
+ table.getName().getNameAsString() + "] in HBase;\n"
|
||||
+ " the row's two columns (created 'on the fly') are: ["
|
||||
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
||||
+ Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
|
||||
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
||||
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
|
||||
System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table ["
|
||||
+ table.getName().getNameAsString() + "] in HBase;\n"
|
||||
+ " the row's two columns (created 'on the fly') are: ["
|
||||
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
|
||||
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
||||
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes Table#get and prints out the contents of the retrieved row.
|
||||
*
|
||||
* @param table Standard Table object
|
||||
* @throws IOException If IO problem encountered
|
||||
*/
|
||||
|
@ -152,38 +138,32 @@ public final class HelloHBase {
|
|||
|
||||
Result row = table.get(new Get(MY_ROW_ID));
|
||||
|
||||
System.out.println("Row [" + Bytes.toString(row.getRow())
|
||||
+ "] was retrieved from Table ["
|
||||
+ table.getName().getNameAsString()
|
||||
+ "] in HBase, with the following content:");
|
||||
System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table ["
|
||||
+ table.getName().getNameAsString() + "] in HBase, with the following content:");
|
||||
|
||||
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry
|
||||
: row.getNoVersionMap().entrySet()) {
|
||||
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap()
|
||||
.entrySet()) {
|
||||
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
|
||||
|
||||
System.out.println(" Columns in Column Family [" + columnFamilyName
|
||||
+ "]:");
|
||||
System.out.println(" Columns in Column Family [" + columnFamilyName + "]:");
|
||||
|
||||
for (Entry<byte[], byte[]> columnNameAndValueMap
|
||||
: colFamilyEntry.getValue().entrySet()) {
|
||||
for (Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) {
|
||||
|
||||
System.out.println(" Value of Column [" + columnFamilyName + ":"
|
||||
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
|
||||
+ Bytes.toString(columnNameAndValueMap.getValue()));
|
||||
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
|
||||
+ Bytes.toString(columnNameAndValueMap.getValue()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks to see whether a namespace exists.
|
||||
*
|
||||
* @param admin Standard Admin object
|
||||
* @param admin Standard Admin object
|
||||
* @param namespaceName Name of namespace
|
||||
* @return true If namespace exists
|
||||
* @throws IOException If IO problem encountered
|
||||
*/
|
||||
static boolean namespaceExists(final Admin admin, final String namespaceName)
|
||||
throws IOException {
|
||||
static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException {
|
||||
try {
|
||||
admin.getNamespaceDescriptor(namespaceName);
|
||||
} catch (NamespaceNotFoundException e) {
|
||||
|
@ -194,28 +174,24 @@ public final class HelloHBase {
|
|||
|
||||
/**
|
||||
* Invokes Table#delete to delete test data (i.e. the row)
|
||||
*
|
||||
* @param table Standard Table object
|
||||
* @throws IOException If IO problem is encountered
|
||||
*/
|
||||
static void deleteRow(final Table table) throws IOException {
|
||||
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID)
|
||||
+ "] from Table ["
|
||||
+ table.getName().getNameAsString() + "].");
|
||||
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table ["
|
||||
+ table.getName().getNameAsString() + "].");
|
||||
table.delete(new Delete(MY_ROW_ID));
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to
|
||||
* disable/delete Table and delete Namespace.
|
||||
*
|
||||
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete
|
||||
* Table and delete Namespace.
|
||||
* @param admin Standard Admin object
|
||||
* @throws IOException If IO problem is encountered
|
||||
*/
|
||||
static void deleteNamespaceAndTable(final Admin admin) throws IOException {
|
||||
if (admin.tableExists(MY_TABLE_NAME)) {
|
||||
System.out.println("Disabling/deleting Table ["
|
||||
+ MY_TABLE_NAME.getNameAsString() + "].");
|
||||
System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "].");
|
||||
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
|
||||
admin.deleteTable(MY_TABLE_NAME);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -44,10 +44,9 @@ public class TestHelloHBase {
|
|||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestHelloHBase.class);
|
||||
HBaseClassTestRule.forClass(TestHelloHBase.class);
|
||||
|
||||
private static final HBaseTestingUtil TEST_UTIL
|
||||
= new HBaseTestingUtil();
|
||||
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
|
@ -67,13 +66,11 @@ public class TestHelloHBase {
|
|||
Admin admin = TEST_UTIL.getAdmin();
|
||||
|
||||
exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE);
|
||||
assertEquals("#namespaceExists failed: found nonexistent namespace.",
|
||||
false, exists);
|
||||
assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists);
|
||||
|
||||
admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build());
|
||||
exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE);
|
||||
assertEquals("#namespaceExists failed: did NOT find existing namespace.",
|
||||
true, exists);
|
||||
assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists);
|
||||
admin.deleteNamespace(EXISTING_NAMESPACE);
|
||||
}
|
||||
|
||||
|
@ -82,14 +79,11 @@ public class TestHelloHBase {
|
|||
Admin admin = TEST_UTIL.getAdmin();
|
||||
HelloHBase.createNamespaceAndTable(admin);
|
||||
|
||||
boolean namespaceExists
|
||||
= HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
|
||||
assertEquals("#createNamespaceAndTable failed to create namespace.",
|
||||
true, namespaceExists);
|
||||
boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
|
||||
assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists);
|
||||
|
||||
boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME);
|
||||
assertEquals("#createNamespaceAndTable failed to create table.",
|
||||
true, tableExists);
|
||||
assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists);
|
||||
|
||||
admin.disableTable(HelloHBase.MY_TABLE_NAME);
|
||||
admin.deleteTable(HelloHBase.MY_TABLE_NAME);
|
||||
|
@ -100,8 +94,7 @@ public class TestHelloHBase {
|
|||
public void testPutRowToTable() throws IOException {
|
||||
Admin admin = TEST_UTIL.getAdmin();
|
||||
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
||||
Table table
|
||||
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||
|
||||
HelloHBase.putRowToTable(table);
|
||||
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
||||
|
@ -115,13 +108,10 @@ public class TestHelloHBase {
|
|||
public void testDeleteRow() throws IOException {
|
||||
Admin admin = TEST_UTIL.getAdmin();
|
||||
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
||||
Table table
|
||||
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||
|
||||
table.put(new Put(HelloHBase.MY_ROW_ID).
|
||||
addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
|
||||
HelloHBase.MY_FIRST_COLUMN_QUALIFIER,
|
||||
Bytes.toBytes("xyz")));
|
||||
table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
|
||||
HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz")));
|
||||
HelloHBase.deleteRow(table);
|
||||
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
||||
assertEquals("#deleteRow failed to delete row.", true, row.isEmpty());
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation=
|
||||
"https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<!--
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
|
@ -24,8 +21,8 @@
|
|||
-->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<artifactId>hbase-archetypes</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-archetypes</artifactId>
|
||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||
<relativePath>..</relativePath>
|
||||
</parent>
|
||||
|
@ -44,16 +41,16 @@
|
|||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-testing-util</artifactId>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>javax.xml.bind</groupId>
|
||||
<artifactId>jaxb-api</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>javax.ws.rs</groupId>
|
||||
<artifactId>jsr311-api</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>javax.xml.bind</groupId>
|
||||
<artifactId>jaxb-api</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>javax.ws.rs</groupId>
|
||||
<artifactId>jsr311-api</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -37,19 +36,17 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Successful running of this application requires access to an active instance
|
||||
* of HBase. For install instructions for a standalone instance of HBase, please
|
||||
* refer to https://hbase.apache.org/book.html#quickstart
|
||||
* Successful running of this application requires access to an active instance of HBase. For
|
||||
* install instructions for a standalone instance of HBase, please refer to
|
||||
* https://hbase.apache.org/book.html#quickstart
|
||||
*/
|
||||
public final class HelloHBase {
|
||||
|
||||
protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
|
||||
static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
|
||||
static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
|
||||
static final byte[] MY_FIRST_COLUMN_QUALIFIER
|
||||
= Bytes.toBytes("myFirstColumn");
|
||||
static final byte[] MY_SECOND_COLUMN_QUALIFIER
|
||||
= Bytes.toBytes("mySecondColumn");
|
||||
static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn");
|
||||
static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn");
|
||||
static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01");
|
||||
|
||||
// Private constructor included here to avoid checkstyle warnings
|
||||
|
@ -60,20 +57,20 @@ public final class HelloHBase {
|
|||
final boolean deleteAllAtEOJ = true;
|
||||
|
||||
/**
|
||||
* ConnectionFactory#createConnection() automatically looks for
|
||||
* hbase-site.xml (HBase configuration parameters) on the system's
|
||||
* CLASSPATH, to enable creation of Connection to HBase via ZooKeeper.
|
||||
* ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase
|
||||
* configuration parameters) on the system's CLASSPATH, to enable creation of Connection to
|
||||
* HBase via ZooKeeper.
|
||||
*/
|
||||
try (Connection connection = ConnectionFactory.createConnection();
|
||||
Admin admin = connection.getAdmin()) {
|
||||
Admin admin = connection.getAdmin()) {
|
||||
admin.getClusterMetrics(); // assure connection successfully established
|
||||
System.out.println("\n*** Hello HBase! -- Connection has been "
|
||||
+ "established via ZooKeeper!!\n");
|
||||
System.out
|
||||
.println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n");
|
||||
|
||||
createNamespaceAndTable(admin);
|
||||
|
||||
System.out.println("Getting a Table object for [" + MY_TABLE_NAME
|
||||
+ "] with which to perform CRUD operations in HBase.");
|
||||
+ "] with which to perform CRUD operations in HBase.");
|
||||
try (Table table = connection.getTable(MY_TABLE_NAME)) {
|
||||
|
||||
putRowToTable(table);
|
||||
|
@ -91,9 +88,8 @@ public final class HelloHBase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Invokes Admin#createNamespace and Admin#createTable to create a namespace
|
||||
* with a table that has one column-family.
|
||||
*
|
||||
* Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has
|
||||
* one column-family.
|
||||
* @param admin Standard Admin object
|
||||
* @throws IOException If IO problem encountered
|
||||
*/
|
||||
|
@ -102,13 +98,11 @@ public final class HelloHBase {
|
|||
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
|
||||
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
|
||||
|
||||
admin.createNamespace(NamespaceDescriptor
|
||||
.create(MY_NAMESPACE_NAME).build());
|
||||
admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build());
|
||||
}
|
||||
if (!admin.tableExists(MY_TABLE_NAME)) {
|
||||
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
|
||||
+ "], with one Column Family ["
|
||||
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
|
||||
+ "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
|
||||
|
||||
admin.createTable(TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
|
||||
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build());
|
||||
|
@ -116,33 +110,26 @@ public final class HelloHBase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Invokes Table#put to store a row (with two new columns created 'on the
|
||||
* fly') into the table.
|
||||
*
|
||||
* Invokes Table#put to store a row (with two new columns created 'on the fly') into the table.
|
||||
* @param table Standard Table object (used for CRUD operations).
|
||||
* @throws IOException If IO problem encountered
|
||||
*/
|
||||
static void putRowToTable(final Table table) throws IOException {
|
||||
|
||||
table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME,
|
||||
MY_FIRST_COLUMN_QUALIFIER,
|
||||
Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME,
|
||||
MY_SECOND_COLUMN_QUALIFIER,
|
||||
Bytes.toBytes("World!")));
|
||||
table.put(new Put(MY_ROW_ID)
|
||||
.addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello"))
|
||||
.addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!")));
|
||||
|
||||
System.out.println("Row [" + Bytes.toString(MY_ROW_ID)
|
||||
+ "] was put into Table ["
|
||||
+ table.getName().getNameAsString() + "] in HBase;\n"
|
||||
+ " the row's two columns (created 'on the fly') are: ["
|
||||
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
||||
+ Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
|
||||
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
||||
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
|
||||
System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table ["
|
||||
+ table.getName().getNameAsString() + "] in HBase;\n"
|
||||
+ " the row's two columns (created 'on the fly') are: ["
|
||||
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
|
||||
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
||||
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes Table#get and prints out the contents of the retrieved row.
|
||||
*
|
||||
* @param table Standard Table object
|
||||
* @throws IOException If IO problem encountered
|
||||
*/
|
||||
|
@ -150,38 +137,32 @@ public final class HelloHBase {
|
|||
|
||||
Result row = table.get(new Get(MY_ROW_ID));
|
||||
|
||||
System.out.println("Row [" + Bytes.toString(row.getRow())
|
||||
+ "] was retrieved from Table ["
|
||||
+ table.getName().getNameAsString()
|
||||
+ "] in HBase, with the following content:");
|
||||
System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table ["
|
||||
+ table.getName().getNameAsString() + "] in HBase, with the following content:");
|
||||
|
||||
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry
|
||||
: row.getNoVersionMap().entrySet()) {
|
||||
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap()
|
||||
.entrySet()) {
|
||||
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
|
||||
|
||||
System.out.println(" Columns in Column Family [" + columnFamilyName
|
||||
+ "]:");
|
||||
System.out.println(" Columns in Column Family [" + columnFamilyName + "]:");
|
||||
|
||||
for (Entry<byte[], byte[]> columnNameAndValueMap
|
||||
: colFamilyEntry.getValue().entrySet()) {
|
||||
for (Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) {
|
||||
|
||||
System.out.println(" Value of Column [" + columnFamilyName + ":"
|
||||
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
|
||||
+ Bytes.toString(columnNameAndValueMap.getValue()));
|
||||
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
|
||||
+ Bytes.toString(columnNameAndValueMap.getValue()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks to see whether a namespace exists.
|
||||
*
|
||||
* @param admin Standard Admin object
|
||||
* @param admin Standard Admin object
|
||||
* @param namespaceName Name of namespace
|
||||
* @return true If namespace exists
|
||||
* @throws IOException If IO problem encountered
|
||||
*/
|
||||
static boolean namespaceExists(final Admin admin, final String namespaceName)
|
||||
throws IOException {
|
||||
static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException {
|
||||
try {
|
||||
admin.getNamespaceDescriptor(namespaceName);
|
||||
} catch (NamespaceNotFoundException e) {
|
||||
|
@ -192,28 +173,24 @@ public final class HelloHBase {
|
|||
|
||||
/**
|
||||
* Invokes Table#delete to delete test data (i.e. the row)
|
||||
*
|
||||
* @param table Standard Table object
|
||||
* @throws IOException If IO problem is encountered
|
||||
*/
|
||||
static void deleteRow(final Table table) throws IOException {
|
||||
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID)
|
||||
+ "] from Table ["
|
||||
+ table.getName().getNameAsString() + "].");
|
||||
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table ["
|
||||
+ table.getName().getNameAsString() + "].");
|
||||
table.delete(new Delete(MY_ROW_ID));
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to
|
||||
* disable/delete Table and delete Namespace.
|
||||
*
|
||||
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete
|
||||
* Table and delete Namespace.
|
||||
* @param admin Standard Admin object
|
||||
* @throws IOException If IO problem is encountered
|
||||
*/
|
||||
static void deleteNamespaceAndTable(final Admin admin) throws IOException {
|
||||
if (admin.tableExists(MY_TABLE_NAME)) {
|
||||
System.out.println("Disabling/deleting Table ["
|
||||
+ MY_TABLE_NAME.getNameAsString() + "].");
|
||||
System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "].");
|
||||
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
|
||||
admin.deleteTable(MY_TABLE_NAME);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -44,10 +44,9 @@ public class TestHelloHBase {
|
|||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestHelloHBase.class);
|
||||
HBaseClassTestRule.forClass(TestHelloHBase.class);
|
||||
|
||||
private static final HBaseTestingUtil TEST_UTIL
|
||||
= new HBaseTestingUtil();
|
||||
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
|
@ -67,13 +66,11 @@ public class TestHelloHBase {
|
|||
Admin admin = TEST_UTIL.getAdmin();
|
||||
|
||||
exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE);
|
||||
assertEquals("#namespaceExists failed: found nonexistent namespace.",
|
||||
false, exists);
|
||||
assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists);
|
||||
|
||||
admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build());
|
||||
exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE);
|
||||
assertEquals("#namespaceExists failed: did NOT find existing namespace.",
|
||||
true, exists);
|
||||
assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists);
|
||||
admin.deleteNamespace(EXISTING_NAMESPACE);
|
||||
}
|
||||
|
||||
|
@ -82,14 +79,11 @@ public class TestHelloHBase {
|
|||
Admin admin = TEST_UTIL.getAdmin();
|
||||
HelloHBase.createNamespaceAndTable(admin);
|
||||
|
||||
boolean namespaceExists
|
||||
= HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
|
||||
assertEquals("#createNamespaceAndTable failed to create namespace.",
|
||||
true, namespaceExists);
|
||||
boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
|
||||
assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists);
|
||||
|
||||
boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME);
|
||||
assertEquals("#createNamespaceAndTable failed to create table.",
|
||||
true, tableExists);
|
||||
assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists);
|
||||
|
||||
admin.disableTable(HelloHBase.MY_TABLE_NAME);
|
||||
admin.deleteTable(HelloHBase.MY_TABLE_NAME);
|
||||
|
@ -100,8 +94,7 @@ public class TestHelloHBase {
|
|||
public void testPutRowToTable() throws IOException {
|
||||
Admin admin = TEST_UTIL.getAdmin();
|
||||
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
||||
Table table
|
||||
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||
|
||||
HelloHBase.putRowToTable(table);
|
||||
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
||||
|
@ -115,13 +108,10 @@ public class TestHelloHBase {
|
|||
public void testDeleteRow() throws IOException {
|
||||
Admin admin = TEST_UTIL.getAdmin();
|
||||
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
||||
Table table
|
||||
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||
|
||||
table.put(new Put(HelloHBase.MY_ROW_ID).
|
||||
addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
|
||||
HelloHBase.MY_FIRST_COLUMN_QUALIFIER,
|
||||
Bytes.toBytes("xyz")));
|
||||
table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
|
||||
HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz")));
|
||||
HelloHBase.deleteRow(table);
|
||||
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
||||
assertEquals("#deleteRow failed to delete row.", true, row.isEmpty());
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
<?xml version="1.0"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<!--
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
|
@ -22,8 +21,8 @@
|
|||
-->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<artifactId>hbase-build-configuration</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-build-configuration</artifactId>
|
||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||
<relativePath>../hbase-build-configuration</relativePath>
|
||||
</parent>
|
||||
|
@ -68,10 +67,10 @@
|
|||
<artifactId>spotbugs-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<inherited>false</inherited>
|
||||
<goals>
|
||||
<goal>spotbugs</goal>
|
||||
</goals>
|
||||
<inherited>false</inherited>
|
||||
<configuration>
|
||||
<excludeFilterFile>${project.basedir}/../dev-support/spotbugs-exclude.xml</excludeFilterFile>
|
||||
</configuration>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
<?xml version="1.0"?>
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<!--
|
||||
/**
|
||||
|
@ -21,160 +21,18 @@
|
|||
-->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<artifactId>hbase-build-configuration</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-build-configuration</artifactId>
|
||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||
<relativePath>../hbase-build-configuration</relativePath>
|
||||
</parent>
|
||||
<artifactId>hbase-assembly</artifactId>
|
||||
<name>Apache HBase - Assembly</name>
|
||||
<description>
|
||||
Module that does project assembly and that is all that it does.
|
||||
</description>
|
||||
<packaging>pom</packaging>
|
||||
<name>Apache HBase - Assembly</name>
|
||||
<description>Module that does project assembly and that is all that it does.</description>
|
||||
<properties>
|
||||
<license.bundles.dependencies>true</license.bundles.dependencies>
|
||||
</properties>
|
||||
<build>
|
||||
<plugins>
|
||||
<!-- licensing info from our dependencies -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-remote-resources-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>aggregate-licenses</id>
|
||||
<goals>
|
||||
<goal>process</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<properties>
|
||||
<copyright-end-year>${build.year}</copyright-end-year>
|
||||
<debug-print-included-work-info>${license.debug.print.included}</debug-print-included-work-info>
|
||||
<bundled-dependencies>${license.bundles.dependencies}</bundled-dependencies>
|
||||
<bundled-jquery>${license.bundles.jquery}</bundled-jquery>
|
||||
<bundled-vega>${license.bundles.vega}</bundled-vega>
|
||||
<bundled-logo>${license.bundles.logo}</bundled-logo>
|
||||
<bundled-bootstrap>${license.bundles.bootstrap}</bundled-bootstrap>
|
||||
</properties>
|
||||
<resourceBundles>
|
||||
<resourceBundle>${project.groupId}:hbase-resource-bundle:${project.version}</resourceBundle>
|
||||
</resourceBundles>
|
||||
<supplementalModelArtifacts>
|
||||
<supplementalModelArtifact>${project.groupId}:hbase-resource-bundle:${project.version}</supplementalModelArtifact>
|
||||
</supplementalModelArtifacts>
|
||||
<supplementalModels>
|
||||
<supplementalModel>supplemental-models.xml</supplementalModel>
|
||||
</supplementalModels>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<configuration>
|
||||
<!--Else will use hbase-assembly as final name.-->
|
||||
<finalName>hbase-${project.version}</finalName>
|
||||
<skipAssembly>false</skipAssembly>
|
||||
<appendAssemblyId>true</appendAssemblyId>
|
||||
<tarLongFileMode>posix</tarLongFileMode>
|
||||
<descriptors>
|
||||
<descriptor>${assembly.file}</descriptor>
|
||||
<descriptor>src/main/assembly/client.xml</descriptor>
|
||||
</descriptors>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-dependency-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<!-- generates the file that will be used by the bin/hbase script in the dev env -->
|
||||
<id>create-hbase-generated-classpath</id>
|
||||
<phase>test</phase>
|
||||
<goals>
|
||||
<goal>build-classpath</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputFile>${project.parent.basedir}/target/cached_classpath.txt</outputFile>
|
||||
<excludeArtifactIds>jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce</excludeArtifactIds>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
||||
<execution>
|
||||
<!-- generates the file that will be used by the bin/hbase zkcli script in the dev env -->
|
||||
<id>create-hbase-generated-classpath-jline</id>
|
||||
<phase>test</phase>
|
||||
<goals>
|
||||
<goal>build-classpath</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputFile>${project.parent.basedir}/target/cached_classpath_jline.txt</outputFile>
|
||||
<includeArtifactIds>jline</includeArtifactIds>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
||||
<execution>
|
||||
<!-- generates the file that will be used by the bin/hbase shell script in the dev env -->
|
||||
<id>create-hbase-generated-classpath-jruby</id>
|
||||
<phase>test</phase>
|
||||
<goals>
|
||||
<goal>build-classpath</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputFile>${project.parent.basedir}/target/cached_classpath_jruby.txt</outputFile>
|
||||
<includeArtifactIds>jruby-complete</includeArtifactIds>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
||||
<!--
|
||||
Build an aggregation of our templated NOTICE file and the NOTICE files in our dependencies.
|
||||
If MASSEMBLY-382 is fixed we could do this in the assembly
|
||||
Currently relies on env, bash, find, and cat.
|
||||
-->
|
||||
<execution>
|
||||
<!-- put all of the NOTICE files out of our dependencies -->
|
||||
<id>unpack-dependency-notices</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>unpack-dependencies</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<excludeTypes>pom</excludeTypes>
|
||||
<useSubDirectoryPerArtifact>true</useSubDirectoryPerArtifact>
|
||||
<includes>**\/NOTICE,**\/NOTICE.txt</includes>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
<version>${exec.maven.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>concat-NOTICE-files</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<executable>env</executable>
|
||||
<arguments>
|
||||
<argument>bash</argument>
|
||||
<argument>-c</argument>
|
||||
<argument>cat maven-shared-archive-resources/META-INF/NOTICE \
|
||||
`find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt`
|
||||
</argument>
|
||||
</arguments>
|
||||
<outputFile>${project.build.directory}/NOTICE.aggregate</outputFile>
|
||||
<workingDirectory>${project.build.directory}</workingDirectory>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<!-- /end building aggregation of NOTICE files -->
|
||||
</plugins>
|
||||
</build>
|
||||
<dependencies>
|
||||
<!-- client artifacts for downstream use -->
|
||||
<dependency>
|
||||
|
@ -189,7 +47,7 @@
|
|||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-shaded-mapreduce</artifactId>
|
||||
</dependency>
|
||||
<!-- Intra-project dependencies -->
|
||||
<!-- Intra-project dependencies -->
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-it</artifactId>
|
||||
|
@ -254,25 +112,25 @@
|
|||
<artifactId>hbase-external-blockcache</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-testing-util</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-testing-util</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-metrics-api</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-metrics-api</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-metrics</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-metrics</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol-shaded</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-resource-bundle</artifactId>
|
||||
<optional>true</optional>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-resource-bundle</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
|
@ -390,4 +248,143 @@
|
|||
<scope>compile</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
<!-- licensing info from our dependencies -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-remote-resources-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>aggregate-licenses</id>
|
||||
<goals>
|
||||
<goal>process</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<properties>
|
||||
<copyright-end-year>${build.year}</copyright-end-year>
|
||||
<debug-print-included-work-info>${license.debug.print.included}</debug-print-included-work-info>
|
||||
<bundled-dependencies>${license.bundles.dependencies}</bundled-dependencies>
|
||||
<bundled-jquery>${license.bundles.jquery}</bundled-jquery>
|
||||
<bundled-vega>${license.bundles.vega}</bundled-vega>
|
||||
<bundled-logo>${license.bundles.logo}</bundled-logo>
|
||||
<bundled-bootstrap>${license.bundles.bootstrap}</bundled-bootstrap>
|
||||
</properties>
|
||||
<resourceBundles>
|
||||
<resourceBundle>${project.groupId}:hbase-resource-bundle:${project.version}</resourceBundle>
|
||||
</resourceBundles>
|
||||
<supplementalModelArtifacts>
|
||||
<supplementalModelArtifact>${project.groupId}:hbase-resource-bundle:${project.version}</supplementalModelArtifact>
|
||||
</supplementalModelArtifacts>
|
||||
<supplementalModels>
|
||||
<supplementalModel>supplemental-models.xml</supplementalModel>
|
||||
</supplementalModels>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<configuration>
|
||||
<!--Else will use hbase-assembly as final name.-->
|
||||
<finalName>hbase-${project.version}</finalName>
|
||||
<skipAssembly>false</skipAssembly>
|
||||
<appendAssemblyId>true</appendAssemblyId>
|
||||
<tarLongFileMode>posix</tarLongFileMode>
|
||||
<descriptors>
|
||||
<descriptor>${assembly.file}</descriptor>
|
||||
<descriptor>src/main/assembly/client.xml</descriptor>
|
||||
</descriptors>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-dependency-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<!-- generates the file that will be used by the bin/hbase script in the dev env -->
|
||||
<id>create-hbase-generated-classpath</id>
|
||||
<goals>
|
||||
<goal>build-classpath</goal>
|
||||
</goals>
|
||||
<phase>test</phase>
|
||||
<configuration>
|
||||
<outputFile>${project.parent.basedir}/target/cached_classpath.txt</outputFile>
|
||||
<excludeArtifactIds>jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce</excludeArtifactIds>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
||||
<execution>
|
||||
<!-- generates the file that will be used by the bin/hbase zkcli script in the dev env -->
|
||||
<id>create-hbase-generated-classpath-jline</id>
|
||||
<goals>
|
||||
<goal>build-classpath</goal>
|
||||
</goals>
|
||||
<phase>test</phase>
|
||||
<configuration>
|
||||
<outputFile>${project.parent.basedir}/target/cached_classpath_jline.txt</outputFile>
|
||||
<includeArtifactIds>jline</includeArtifactIds>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
||||
<execution>
|
||||
<!-- generates the file that will be used by the bin/hbase shell script in the dev env -->
|
||||
<id>create-hbase-generated-classpath-jruby</id>
|
||||
<goals>
|
||||
<goal>build-classpath</goal>
|
||||
</goals>
|
||||
<phase>test</phase>
|
||||
<configuration>
|
||||
<outputFile>${project.parent.basedir}/target/cached_classpath_jruby.txt</outputFile>
|
||||
<includeArtifactIds>jruby-complete</includeArtifactIds>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
||||
<!--
|
||||
Build an aggregation of our templated NOTICE file and the NOTICE files in our dependencies.
|
||||
If MASSEMBLY-382 is fixed we could do this in the assembly
|
||||
Currently relies on env, bash, find, and cat.
|
||||
-->
|
||||
<execution>
|
||||
<!-- put all of the NOTICE files out of our dependencies -->
|
||||
<id>unpack-dependency-notices</id>
|
||||
<goals>
|
||||
<goal>unpack-dependencies</goal>
|
||||
</goals>
|
||||
<phase>prepare-package</phase>
|
||||
<configuration>
|
||||
<excludeTypes>pom</excludeTypes>
|
||||
<useSubDirectoryPerArtifact>true</useSubDirectoryPerArtifact>
|
||||
<includes>**\/NOTICE,**\/NOTICE.txt</includes>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
<version>${exec.maven.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>concat-NOTICE-files</id>
|
||||
<goals>
|
||||
<goal>exec</goal>
|
||||
</goals>
|
||||
<phase>package</phase>
|
||||
<configuration>
|
||||
<executable>env</executable>
|
||||
<arguments>
|
||||
<argument>bash</argument>
|
||||
<argument>-c</argument>
|
||||
<argument>cat maven-shared-archive-resources/META-INF/NOTICE \
|
||||
`find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt`</argument>
|
||||
</arguments>
|
||||
<outputFile>${project.build.directory}/NOTICE.aggregate</outputFile>
|
||||
<workingDirectory>${project.build.directory}</workingDirectory>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<!-- /end building aggregation of NOTICE files -->
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
<?xml version="1.0"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<!--
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
|
@ -22,8 +21,8 @@
|
|||
-->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<artifactId>hbase-build-configuration</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-build-configuration</artifactId>
|
||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||
<relativePath>../hbase-build-configuration</relativePath>
|
||||
</parent>
|
||||
|
@ -31,33 +30,6 @@
|
|||
<artifactId>hbase-asyncfs</artifactId>
|
||||
<name>Apache HBase - Asynchronous FileSystem</name>
|
||||
<description>HBase Asynchronous FileSystem Implementation for WAL</description>
|
||||
<build>
|
||||
<plugins>
|
||||
<!-- Make a jar and put the sources in the jar -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<!--Make it so assembly:single does nothing in here-->
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<configuration>
|
||||
<skipAssembly>true</skipAssembly>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>net.revelc.code</groupId>
|
||||
<artifactId>warbucks-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<configuration>
|
||||
<failOnViolation>true</failOnViolation>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
|
@ -169,13 +141,42 @@
|
|||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
<!-- Make a jar and put the sources in the jar -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<!--Make it so assembly:single does nothing in here-->
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<configuration>
|
||||
<skipAssembly>true</skipAssembly>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>net.revelc.code</groupId>
|
||||
<artifactId>warbucks-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<configuration>
|
||||
<failOnViolation>true</failOnViolation>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<profiles>
|
||||
<!-- Profiles for building against different hadoop versions -->
|
||||
<profile>
|
||||
<id>hadoop-3.0</id>
|
||||
<activation>
|
||||
<property><name>!hadoop.profile</name></property>
|
||||
<property>
|
||||
<name>!hadoop.profile</name>
|
||||
</property>
|
||||
</activation>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
|
@ -224,8 +225,7 @@
|
|||
<artifactId>lifecycle-mapping</artifactId>
|
||||
<configuration>
|
||||
<lifecycleMappingMetadata>
|
||||
<pluginExecutions>
|
||||
</pluginExecutions>
|
||||
<pluginExecutions/>
|
||||
</lifecycleMappingMetadata>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -21,10 +21,9 @@ import java.io.Closeable;
|
|||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.util.CancelableProgressable;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Interface for asynchronous filesystem output stream.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -47,9 +47,9 @@ public final class AsyncFSOutputHelper {
|
|||
* implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}.
|
||||
*/
|
||||
public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite,
|
||||
boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup,
|
||||
Class<? extends Channel> channelClass, StreamSlowMonitor monitor)
|
||||
throws IOException, CommonFSUtils.StreamLacksCapabilityException {
|
||||
boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup,
|
||||
Class<? extends Channel> channelClass, StreamSlowMonitor monitor)
|
||||
throws IOException, CommonFSUtils.StreamLacksCapabilityException {
|
||||
if (fs instanceof DistributedFileSystem) {
|
||||
return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f,
|
||||
overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -180,7 +180,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
|||
|
||||
// State for connections to DN
|
||||
private enum State {
|
||||
STREAMING, CLOSING, BROKEN, CLOSED
|
||||
STREAMING,
|
||||
CLOSING,
|
||||
BROKEN,
|
||||
CLOSED
|
||||
}
|
||||
|
||||
private volatile State state;
|
||||
|
@ -196,7 +199,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
|||
if (c.unfinishedReplicas.remove(channel.id())) {
|
||||
long current = EnvironmentEdgeManager.currentTime();
|
||||
streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen,
|
||||
current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size());
|
||||
current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size());
|
||||
c.lastAckTimestamp = current;
|
||||
if (c.unfinishedReplicas.isEmpty()) {
|
||||
// we need to remove first before complete the future. It is possible that after we
|
||||
|
@ -284,13 +287,13 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
|||
protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception {
|
||||
Status reply = getStatus(ack);
|
||||
if (reply != Status.SUCCESS) {
|
||||
failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " +
|
||||
block + " from datanode " + ctx.channel().remoteAddress()));
|
||||
failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + block
|
||||
+ " from datanode " + ctx.channel().remoteAddress()));
|
||||
return;
|
||||
}
|
||||
if (PipelineAck.isRestartOOBStatus(reply)) {
|
||||
failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " +
|
||||
block + " from datanode " + ctx.channel().remoteAddress()));
|
||||
failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block "
|
||||
+ block + " from datanode " + ctx.channel().remoteAddress()));
|
||||
return;
|
||||
}
|
||||
if (ack.getSeqno() == HEART_BEAT_SEQNO) {
|
||||
|
@ -345,10 +348,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
|||
}
|
||||
}
|
||||
|
||||
FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs,
|
||||
DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId,
|
||||
LocatedBlock locatedBlock, Encryptor encryptor, Map<Channel, DatanodeInfo> datanodeInfoMap,
|
||||
DataChecksum summer, ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) {
|
||||
FanOutOneBlockAsyncDFSOutput(Configuration conf, DistributedFileSystem dfs, DFSClient client,
|
||||
ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock,
|
||||
Encryptor encryptor, Map<Channel, DatanodeInfo> datanodeInfoMap, DataChecksum summer,
|
||||
ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) {
|
||||
this.conf = conf;
|
||||
this.dfs = dfs;
|
||||
this.client = client;
|
||||
|
@ -403,7 +406,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
|||
}
|
||||
|
||||
private void flushBuffer(CompletableFuture<Long> future, ByteBuf dataBuf,
|
||||
long nextPacketOffsetInBlock, boolean syncBlock) {
|
||||
long nextPacketOffsetInBlock, boolean syncBlock) {
|
||||
int dataLen = dataBuf.readableBytes();
|
||||
int chunkLen = summer.getBytesPerChecksum();
|
||||
int trailingPartialChunkLen = dataLen % chunkLen;
|
||||
|
@ -413,13 +416,13 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
|||
summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
|
||||
checksumBuf.writerIndex(checksumLen);
|
||||
PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock,
|
||||
nextPacketSeqno, false, dataLen, syncBlock);
|
||||
nextPacketSeqno, false, dataLen, syncBlock);
|
||||
int headerLen = header.getSerializedSize();
|
||||
ByteBuf headerBuf = alloc.buffer(headerLen);
|
||||
header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
|
||||
headerBuf.writerIndex(headerLen);
|
||||
Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen,
|
||||
datanodeInfoMap.keySet(), dataLen);
|
||||
Callback c =
|
||||
new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeInfoMap.keySet(), dataLen);
|
||||
waitingAckQueue.addLast(c);
|
||||
// recheck again after we pushed the callback to queue
|
||||
if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) {
|
||||
|
@ -429,7 +432,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
|||
return;
|
||||
}
|
||||
// TODO: we should perhaps measure time taken per DN here;
|
||||
// we could collect statistics per DN, and/or exclude bad nodes in createOutput.
|
||||
// we could collect statistics per DN, and/or exclude bad nodes in createOutput.
|
||||
datanodeInfoMap.keySet().forEach(ch -> {
|
||||
ch.write(headerBuf.retainedDuplicate());
|
||||
ch.write(checksumBuf.retainedDuplicate());
|
||||
|
@ -514,7 +517,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
|||
}
|
||||
trailingPartialChunkLength = dataLen % summer.getBytesPerChecksum();
|
||||
ByteBuf newBuf = alloc.directBuffer(sendBufSizePRedictor.guess(dataLen))
|
||||
.ensureWritable(trailingPartialChunkLength);
|
||||
.ensureWritable(trailingPartialChunkLength);
|
||||
if (trailingPartialChunkLength != 0) {
|
||||
buf.readerIndex(dataLen - trailingPartialChunkLength).readBytes(newBuf,
|
||||
trailingPartialChunkLength);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -116,7 +116,7 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
|
|||
@InterfaceAudience.Private
|
||||
public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class);
|
||||
LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class);
|
||||
|
||||
private FanOutOneBlockAsyncDFSOutputHelper() {
|
||||
}
|
||||
|
@ -145,9 +145,8 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
// helper class for creating files.
|
||||
private interface FileCreator {
|
||||
default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission masked,
|
||||
String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent,
|
||||
short replication, long blockSize, CryptoProtocolVersion[] supportedVersions)
|
||||
throws Exception {
|
||||
String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication,
|
||||
long blockSize, CryptoProtocolVersion[] supportedVersions) throws Exception {
|
||||
try {
|
||||
return (HdfsFileStatus) createObject(instance, src, masked, clientName, flag, createParent,
|
||||
replication, blockSize, supportedVersions);
|
||||
|
@ -161,15 +160,15 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
}
|
||||
|
||||
Object createObject(ClientProtocol instance, String src, FsPermission masked, String clientName,
|
||||
EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize,
|
||||
CryptoProtocolVersion[] supportedVersions) throws Exception;
|
||||
EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize,
|
||||
CryptoProtocolVersion[] supportedVersions) throws Exception;
|
||||
}
|
||||
|
||||
private static final FileCreator FILE_CREATOR;
|
||||
|
||||
private static LeaseManager createLeaseManager() throws NoSuchMethodException {
|
||||
Method beginFileLeaseMethod =
|
||||
DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class);
|
||||
DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class);
|
||||
beginFileLeaseMethod.setAccessible(true);
|
||||
Method endFileLeaseMethod = DFSClient.class.getDeclaredMethod("endFileLease", long.class);
|
||||
endFileLeaseMethod.setAccessible(true);
|
||||
|
@ -197,13 +196,13 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
|
||||
private static FileCreator createFileCreator3_3() throws NoSuchMethodException {
|
||||
Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class,
|
||||
String.class, EnumSetWritable.class, boolean.class, short.class, long.class,
|
||||
CryptoProtocolVersion[].class, String.class, String.class);
|
||||
String.class, EnumSetWritable.class, boolean.class, short.class, long.class,
|
||||
CryptoProtocolVersion[].class, String.class, String.class);
|
||||
|
||||
return (instance, src, masked, clientName, flag, createParent, replication, blockSize,
|
||||
supportedVersions) -> {
|
||||
supportedVersions) -> {
|
||||
return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag,
|
||||
createParent, replication, blockSize, supportedVersions, null, null);
|
||||
createParent, replication, blockSize, supportedVersions, null, null);
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -213,7 +212,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
CryptoProtocolVersion[].class, String.class);
|
||||
|
||||
return (instance, src, masked, clientName, flag, createParent, replication, blockSize,
|
||||
supportedVersions) -> {
|
||||
supportedVersions) -> {
|
||||
return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag,
|
||||
createParent, replication, blockSize, supportedVersions, null);
|
||||
};
|
||||
|
@ -249,9 +248,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
LEASE_MANAGER = createLeaseManager();
|
||||
FILE_CREATOR = createFileCreator();
|
||||
} catch (Exception e) {
|
||||
String msg = "Couldn't properly initialize access to HDFS internals. Please " +
|
||||
"update your WAL Provider to not make use of the 'asyncfs' provider. See " +
|
||||
"HBASE-16110 for more information.";
|
||||
String msg = "Couldn't properly initialize access to HDFS internals. Please "
|
||||
+ "update your WAL Provider to not make use of the 'asyncfs' provider. See "
|
||||
+ "HBASE-16110 for more information.";
|
||||
LOG.error(msg, e);
|
||||
throw new Error(msg, e);
|
||||
}
|
||||
|
@ -282,7 +281,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
}
|
||||
|
||||
private static void processWriteBlockResponse(Channel channel, DatanodeInfo dnInfo,
|
||||
Promise<Channel> promise, int timeoutMs) {
|
||||
Promise<Channel> promise, int timeoutMs) {
|
||||
channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS),
|
||||
new ProtobufVarint32FrameDecoder(),
|
||||
new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()),
|
||||
|
@ -290,7 +289,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
|
||||
@Override
|
||||
protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp)
|
||||
throws Exception {
|
||||
throws Exception {
|
||||
Status pipelineStatus = resp.getStatus();
|
||||
if (PipelineAck.isRestartOOBStatus(pipelineStatus)) {
|
||||
throw new IOException("datanode " + dnInfo + " is restarting");
|
||||
|
@ -298,11 +297,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink();
|
||||
if (resp.getStatus() != Status.SUCCESS) {
|
||||
if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) {
|
||||
throw new InvalidBlockTokenException("Got access token error" + ", status message " +
|
||||
resp.getMessage() + ", " + logInfo);
|
||||
throw new InvalidBlockTokenException("Got access token error" + ", status message "
|
||||
+ resp.getMessage() + ", " + logInfo);
|
||||
} else {
|
||||
throw new IOException("Got error" + ", status=" + resp.getStatus().name() +
|
||||
", status message " + resp.getMessage() + ", " + logInfo);
|
||||
throw new IOException("Got error" + ", status=" + resp.getStatus().name()
|
||||
+ ", status message " + resp.getMessage() + ", " + logInfo);
|
||||
}
|
||||
}
|
||||
// success
|
||||
|
@ -329,7 +328,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
|
||||
if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == READER_IDLE) {
|
||||
promise
|
||||
.tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response"));
|
||||
.tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response"));
|
||||
} else {
|
||||
super.userEventTriggered(ctx, evt);
|
||||
}
|
||||
|
@ -343,7 +342,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
}
|
||||
|
||||
private static void requestWriteBlock(Channel channel, StorageType storageType,
|
||||
OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException {
|
||||
OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException {
|
||||
OpWriteBlockProto proto =
|
||||
writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build();
|
||||
int protoLen = proto.getSerializedSize();
|
||||
|
@ -356,9 +355,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
}
|
||||
|
||||
private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo,
|
||||
StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs,
|
||||
DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise)
|
||||
throws IOException {
|
||||
StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs,
|
||||
DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise)
|
||||
throws IOException {
|
||||
Promise<Void> saslPromise = channel.eventLoop().newPromise();
|
||||
trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
|
||||
saslPromise.addListener(new FutureListener<Void>() {
|
||||
|
@ -377,13 +376,13 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
}
|
||||
|
||||
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client,
|
||||
String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS,
|
||||
BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup,
|
||||
Class<? extends Channel> channelClass) {
|
||||
String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS,
|
||||
BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup,
|
||||
Class<? extends Channel> channelClass) {
|
||||
StorageType[] storageTypes = locatedBlock.getStorageTypes();
|
||||
DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
|
||||
boolean connectToDnViaHostname =
|
||||
conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
|
||||
conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
|
||||
int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
|
||||
ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
|
||||
blockCopy.setNumBytes(locatedBlock.getBlockSize());
|
||||
|
@ -392,11 +391,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
.setToken(PBHelperClient.convert(locatedBlock.getBlockToken())))
|
||||
.setClientName(clientName).build();
|
||||
ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
|
||||
OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder()
|
||||
.setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()))
|
||||
.setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes())
|
||||
.setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS)
|
||||
.setRequestedChecksum(checksumProto)
|
||||
OpWriteBlockProto.Builder writeBlockProtoBuilder =
|
||||
OpWriteBlockProto.newBuilder().setHeader(header)
|
||||
.setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1)
|
||||
.setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd)
|
||||
.setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto)
|
||||
.setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
|
||||
List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
|
||||
for (int i = 0; i < datanodeInfos.length; i++) {
|
||||
|
@ -406,26 +405,26 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
futureList.add(promise);
|
||||
String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
|
||||
new Bootstrap().group(eventLoopGroup).channel(channelClass)
|
||||
.option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {
|
||||
.option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {
|
||||
|
||||
@Override
|
||||
protected void initChannel(Channel ch) throws Exception {
|
||||
// we need to get the remote address of the channel so we can only move on after
|
||||
// channel connected. Leave an empty implementation here because netty does not allow
|
||||
// a null handler.
|
||||
}
|
||||
}).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {
|
||||
@Override
|
||||
protected void initChannel(Channel ch) throws Exception {
|
||||
// we need to get the remote address of the channel so we can only move on after
|
||||
// channel connected. Leave an empty implementation here because netty does not allow
|
||||
// a null handler.
|
||||
}
|
||||
}).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {
|
||||
|
||||
@Override
|
||||
public void operationComplete(ChannelFuture future) throws Exception {
|
||||
if (future.isSuccess()) {
|
||||
initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder,
|
||||
timeoutMs, client, locatedBlock.getBlockToken(), promise);
|
||||
} else {
|
||||
promise.tryFailure(future.cause());
|
||||
}
|
||||
@Override
|
||||
public void operationComplete(ChannelFuture future) throws Exception {
|
||||
if (future.isSuccess()) {
|
||||
initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder,
|
||||
timeoutMs, client, locatedBlock.getBlockToken(), promise);
|
||||
} else {
|
||||
promise.tryFailure(future.cause());
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
return futureList;
|
||||
}
|
||||
|
@ -453,21 +452,21 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
}
|
||||
|
||||
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src,
|
||||
boolean overwrite, boolean createParent, short replication, long blockSize,
|
||||
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass,
|
||||
StreamSlowMonitor monitor) throws IOException {
|
||||
boolean overwrite, boolean createParent, short replication, long blockSize,
|
||||
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor)
|
||||
throws IOException {
|
||||
Configuration conf = dfs.getConf();
|
||||
DFSClient client = dfs.getClient();
|
||||
String clientName = client.getClientName();
|
||||
ClientProtocol namenode = client.getNamenode();
|
||||
int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES,
|
||||
DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
|
||||
int createMaxRetries =
|
||||
conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
|
||||
ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager();
|
||||
Set<DatanodeInfo> toExcludeNodes =
|
||||
new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet());
|
||||
for (int retry = 0;; retry++) {
|
||||
LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src,
|
||||
toExcludeNodes, retry);
|
||||
toExcludeNodes, retry);
|
||||
HdfsFileStatus stat;
|
||||
try {
|
||||
stat = FILE_CREATOR.create(namenode, src,
|
||||
|
@ -556,14 +555,14 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
* inside an {@link EventLoop}.
|
||||
*/
|
||||
public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f,
|
||||
boolean overwrite, boolean createParent, short replication, long blockSize,
|
||||
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass,
|
||||
final StreamSlowMonitor monitor) throws IOException {
|
||||
boolean overwrite, boolean createParent, short replication, long blockSize,
|
||||
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass,
|
||||
final StreamSlowMonitor monitor) throws IOException {
|
||||
return new FileSystemLinkResolver<FanOutOneBlockAsyncDFSOutput>() {
|
||||
|
||||
@Override
|
||||
public FanOutOneBlockAsyncDFSOutput doCall(Path p)
|
||||
throws IOException, UnresolvedLinkException {
|
||||
throws IOException, UnresolvedLinkException {
|
||||
return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication,
|
||||
blockSize, eventLoopGroup, channelClass, monitor);
|
||||
}
|
||||
|
@ -583,7 +582,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
|||
}
|
||||
|
||||
static void completeFile(DFSClient client, ClientProtocol namenode, String src, String clientName,
|
||||
ExtendedBlock block, long fileId) {
|
||||
ExtendedBlock block, long fileId) {
|
||||
for (int retry = 0;; retry++) {
|
||||
try {
|
||||
if (namenode.complete(src, clientName, block, fileId)) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -104,7 +104,7 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
|
|||
@InterfaceAudience.Private
|
||||
public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputSaslHelper.class);
|
||||
LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputSaslHelper.class);
|
||||
|
||||
private FanOutOneBlockAsyncDFSOutputSaslHelper() {
|
||||
}
|
||||
|
@ -129,21 +129,21 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
private interface TransparentCryptoHelper {
|
||||
|
||||
Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client)
|
||||
throws IOException;
|
||||
throws IOException;
|
||||
}
|
||||
|
||||
private static final TransparentCryptoHelper TRANSPARENT_CRYPTO_HELPER;
|
||||
|
||||
private static SaslAdaptor createSaslAdaptor()
|
||||
throws NoSuchFieldException, NoSuchMethodException {
|
||||
throws NoSuchFieldException, NoSuchMethodException {
|
||||
Field saslPropsResolverField =
|
||||
SaslDataTransferClient.class.getDeclaredField("saslPropsResolver");
|
||||
SaslDataTransferClient.class.getDeclaredField("saslPropsResolver");
|
||||
saslPropsResolverField.setAccessible(true);
|
||||
Field trustedChannelResolverField =
|
||||
SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver");
|
||||
SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver");
|
||||
trustedChannelResolverField.setAccessible(true);
|
||||
Field fallbackToSimpleAuthField =
|
||||
SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth");
|
||||
SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth");
|
||||
fallbackToSimpleAuthField.setAccessible(true);
|
||||
return new SaslAdaptor() {
|
||||
|
||||
|
@ -177,7 +177,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
}
|
||||
|
||||
private static TransparentCryptoHelper createTransparentCryptoHelperWithoutHDFS12396()
|
||||
throws NoSuchMethodException {
|
||||
throws NoSuchMethodException {
|
||||
Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class
|
||||
.getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class);
|
||||
decryptEncryptedDataEncryptionKeyMethod.setAccessible(true);
|
||||
|
@ -185,7 +185,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
|
||||
@Override
|
||||
public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo,
|
||||
DFSClient client) throws IOException {
|
||||
DFSClient client) throws IOException {
|
||||
try {
|
||||
KeyVersion decryptedKey =
|
||||
(KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo);
|
||||
|
@ -206,7 +206,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
}
|
||||
|
||||
private static TransparentCryptoHelper createTransparentCryptoHelperWithHDFS12396()
|
||||
throws ClassNotFoundException, NoSuchMethodException {
|
||||
throws ClassNotFoundException, NoSuchMethodException {
|
||||
Class<?> hdfsKMSUtilCls = Class.forName("org.apache.hadoop.hdfs.HdfsKMSUtil");
|
||||
Method decryptEncryptedDataEncryptionKeyMethod = hdfsKMSUtilCls.getDeclaredMethod(
|
||||
"decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class, KeyProvider.class);
|
||||
|
@ -215,7 +215,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
|
||||
@Override
|
||||
public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo,
|
||||
DFSClient client) throws IOException {
|
||||
DFSClient client) throws IOException {
|
||||
try {
|
||||
KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod
|
||||
.invoke(null, feInfo, client.getKeyProvider());
|
||||
|
@ -236,12 +236,12 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
}
|
||||
|
||||
private static TransparentCryptoHelper createTransparentCryptoHelper()
|
||||
throws NoSuchMethodException, ClassNotFoundException {
|
||||
throws NoSuchMethodException, ClassNotFoundException {
|
||||
try {
|
||||
return createTransparentCryptoHelperWithoutHDFS12396();
|
||||
} catch (NoSuchMethodException e) {
|
||||
LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," +
|
||||
" should be hadoop version with HDFS-12396", e);
|
||||
LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient,"
|
||||
+ " should be hadoop version with HDFS-12396", e);
|
||||
}
|
||||
return createTransparentCryptoHelperWithHDFS12396();
|
||||
}
|
||||
|
@ -252,8 +252,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
TRANSPARENT_CRYPTO_HELPER = createTransparentCryptoHelper();
|
||||
} catch (Exception e) {
|
||||
String msg = "Couldn't properly initialize access to HDFS internals. Please "
|
||||
+ "update your WAL Provider to not make use of the 'asyncfs' provider. See "
|
||||
+ "HBASE-16110 for more information.";
|
||||
+ "update your WAL Provider to not make use of the 'asyncfs' provider. See "
|
||||
+ "HBASE-16110 for more information.";
|
||||
LOG.error(msg, e);
|
||||
throw new Error(msg, e);
|
||||
}
|
||||
|
@ -324,8 +324,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
private int step = 0;
|
||||
|
||||
public SaslNegotiateHandler(Configuration conf, String username, char[] password,
|
||||
Map<String, String> saslProps, int timeoutMs, Promise<Void> promise,
|
||||
DFSClient dfsClient) throws SaslException {
|
||||
Map<String, String> saslProps, int timeoutMs, Promise<Void> promise, DFSClient dfsClient)
|
||||
throws SaslException {
|
||||
this.conf = conf;
|
||||
this.saslProps = saslProps;
|
||||
this.saslClient = Sasl.createSaslClient(new String[] { MECHANISM }, username, PROTOCOL,
|
||||
|
@ -355,8 +355,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
}
|
||||
|
||||
/**
|
||||
* The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty.
|
||||
* After Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*.
|
||||
* The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. After
|
||||
* Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*.
|
||||
* Use Reflection to check which ones to use.
|
||||
*/
|
||||
private static class BuilderPayloadSetter {
|
||||
|
@ -366,13 +366,11 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
/**
|
||||
* Create a ByteString from byte array without copying (wrap), and then set it as the payload
|
||||
* for the builder.
|
||||
*
|
||||
* @param builder builder for HDFS DataTransferEncryptorMessage.
|
||||
* @param payload byte array of payload.
|
||||
* @throws IOException
|
||||
* @param payload byte array of payload. n
|
||||
*/
|
||||
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, byte[] payload)
|
||||
throws IOException {
|
||||
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder,
|
||||
byte[] payload) throws IOException {
|
||||
Object byteStringObject;
|
||||
try {
|
||||
// byteStringObject = new LiteralByteString(payload);
|
||||
|
@ -396,18 +394,18 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
try {
|
||||
// See if it can load the relocated ByteString, which comes from hadoop-thirdparty.
|
||||
byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString");
|
||||
LOG.debug("Found relocated ByteString class from hadoop-thirdparty." +
|
||||
" Assuming this is Hadoop 3.3.0+.");
|
||||
LOG.debug("Found relocated ByteString class from hadoop-thirdparty."
|
||||
+ " Assuming this is Hadoop 3.3.0+.");
|
||||
} catch (ClassNotFoundException e) {
|
||||
LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." +
|
||||
" Assuming this is below Hadoop 3.3.0", e);
|
||||
LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty."
|
||||
+ " Assuming this is below Hadoop 3.3.0", e);
|
||||
}
|
||||
|
||||
// LiteralByteString is a package private class in protobuf. Make it accessible.
|
||||
Class<?> literalByteStringClass;
|
||||
try {
|
||||
literalByteStringClass = Class.forName(
|
||||
"org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString");
|
||||
literalByteStringClass =
|
||||
Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString");
|
||||
LOG.debug("Shaded LiteralByteString from hadoop-thirdparty is found.");
|
||||
} catch (ClassNotFoundException e) {
|
||||
try {
|
||||
|
@ -435,9 +433,9 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
}
|
||||
|
||||
private void sendSaslMessage(ChannelHandlerContext ctx, byte[] payload,
|
||||
List<CipherOption> options) throws IOException {
|
||||
List<CipherOption> options) throws IOException {
|
||||
DataTransferEncryptorMessageProto.Builder builder =
|
||||
DataTransferEncryptorMessageProto.newBuilder();
|
||||
DataTransferEncryptorMessageProto.newBuilder();
|
||||
builder.setStatus(DataTransferEncryptorStatus.SUCCESS);
|
||||
if (payload != null) {
|
||||
BuilderPayloadSetter.wrapAndSetPayload(builder, payload);
|
||||
|
@ -486,7 +484,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
|
||||
private boolean requestedQopContainsPrivacy() {
|
||||
Set<String> requestedQop =
|
||||
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
|
||||
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
|
||||
return requestedQop.contains("auth-conf");
|
||||
}
|
||||
|
||||
|
@ -495,15 +493,14 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
throw new IOException("Failed to complete SASL handshake");
|
||||
}
|
||||
Set<String> requestedQop =
|
||||
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
|
||||
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
|
||||
String negotiatedQop = getNegotiatedQop();
|
||||
LOG.debug(
|
||||
"Verifying QOP, requested QOP = " + requestedQop + ", negotiated QOP = " + negotiatedQop);
|
||||
if (!requestedQop.contains(negotiatedQop)) {
|
||||
throw new IOException(String.format("SASL handshake completed, but "
|
||||
+ "channel does not have acceptable quality of protection, "
|
||||
+ "requested = %s, negotiated = %s",
|
||||
requestedQop, negotiatedQop));
|
||||
+ "channel does not have acceptable quality of protection, "
|
||||
+ "requested = %s, negotiated = %s", requestedQop, negotiatedQop));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -522,13 +519,13 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
outKey = saslClient.unwrap(outKey, 0, outKey.length);
|
||||
}
|
||||
return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(), outKey,
|
||||
option.getOutIv());
|
||||
option.getOutIv());
|
||||
}
|
||||
|
||||
private CipherOption getCipherOption(DataTransferEncryptorMessageProto proto,
|
||||
boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException {
|
||||
boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException {
|
||||
List<CipherOption> cipherOptions =
|
||||
PBHelperClient.convertCipherOptionProtos(proto.getCipherOptionList());
|
||||
PBHelperClient.convertCipherOptionProtos(proto.getCipherOptionList());
|
||||
if (cipherOptions == null || cipherOptions.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
@ -558,7 +555,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
assert response == null;
|
||||
checkSaslComplete();
|
||||
CipherOption cipherOption =
|
||||
getCipherOption(proto, isNegotiatedQopPrivacy(), saslClient);
|
||||
getCipherOption(proto, isNegotiatedQopPrivacy(), saslClient);
|
||||
ChannelPipeline p = ctx.pipeline();
|
||||
while (p.first() != null) {
|
||||
p.removeFirst();
|
||||
|
@ -639,7 +636,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
|
||||
@Override
|
||||
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
|
||||
throws Exception {
|
||||
throws Exception {
|
||||
if (msg instanceof ByteBuf) {
|
||||
ByteBuf buf = (ByteBuf) msg;
|
||||
cBuf.addComponent(buf);
|
||||
|
@ -676,7 +673,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
private final Decryptor decryptor;
|
||||
|
||||
public DecryptHandler(CryptoCodec codec, byte[] key, byte[] iv)
|
||||
throws GeneralSecurityException, IOException {
|
||||
throws GeneralSecurityException, IOException {
|
||||
this.decryptor = codec.createDecryptor();
|
||||
this.decryptor.init(key, Arrays.copyOf(iv, iv.length));
|
||||
}
|
||||
|
@ -709,14 +706,14 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
private final Encryptor encryptor;
|
||||
|
||||
public EncryptHandler(CryptoCodec codec, byte[] key, byte[] iv)
|
||||
throws GeneralSecurityException, IOException {
|
||||
throws GeneralSecurityException, IOException {
|
||||
this.encryptor = codec.createEncryptor();
|
||||
this.encryptor.init(key, Arrays.copyOf(iv, iv.length));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect)
|
||||
throws Exception {
|
||||
throws Exception {
|
||||
if (preferDirect) {
|
||||
return ctx.alloc().directBuffer(msg.readableBytes());
|
||||
} else {
|
||||
|
@ -747,7 +744,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
|
||||
private static String getUserNameFromEncryptionKey(DataEncryptionKey encryptionKey) {
|
||||
return encryptionKey.keyId + NAME_DELIMITER + encryptionKey.blockPoolId + NAME_DELIMITER
|
||||
+ Base64.getEncoder().encodeToString(encryptionKey.nonce);
|
||||
+ Base64.getEncoder().encodeToString(encryptionKey.nonce);
|
||||
}
|
||||
|
||||
private static char[] encryptionKeyToPassword(byte[] encryptionKey) {
|
||||
|
@ -771,26 +768,26 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
}
|
||||
|
||||
private static void doSaslNegotiation(Configuration conf, Channel channel, int timeoutMs,
|
||||
String username, char[] password, Map<String, String> saslProps, Promise<Void> saslPromise,
|
||||
DFSClient dfsClient) {
|
||||
String username, char[] password, Map<String, String> saslProps, Promise<Void> saslPromise,
|
||||
DFSClient dfsClient) {
|
||||
try {
|
||||
channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS),
|
||||
new ProtobufVarint32FrameDecoder(),
|
||||
new ProtobufDecoder(DataTransferEncryptorMessageProto.getDefaultInstance()),
|
||||
new SaslNegotiateHandler(conf, username, password, saslProps, timeoutMs, saslPromise,
|
||||
dfsClient));
|
||||
dfsClient));
|
||||
} catch (SaslException e) {
|
||||
saslPromise.tryFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo,
|
||||
int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken,
|
||||
Promise<Void> saslPromise) throws IOException {
|
||||
int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken,
|
||||
Promise<Void> saslPromise) throws IOException {
|
||||
SaslDataTransferClient saslClient = client.getSaslDataTransferClient();
|
||||
SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient);
|
||||
TrustedChannelResolver trustedChannelResolver =
|
||||
SASL_ADAPTOR.getTrustedChannelResolver(saslClient);
|
||||
SASL_ADAPTOR.getTrustedChannelResolver(saslClient);
|
||||
AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient);
|
||||
InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress();
|
||||
if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) {
|
||||
|
@ -805,24 +802,23 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
}
|
||||
doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey),
|
||||
encryptionKeyToPassword(encryptionKey.encryptionKey),
|
||||
createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise,
|
||||
client);
|
||||
createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, client);
|
||||
} else if (!UserGroupInformation.isSecurityEnabled()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr
|
||||
+ ", datanodeId = " + dnInfo);
|
||||
+ ", datanodeId = " + dnInfo);
|
||||
}
|
||||
saslPromise.trySuccess(null);
|
||||
} else if (dnInfo.getXferPort() < 1024) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("SASL client skipping handshake in secured configuration with "
|
||||
+ "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
|
||||
+ "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
|
||||
}
|
||||
saslPromise.trySuccess(null);
|
||||
} else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("SASL client skipping handshake in secured configuration with "
|
||||
+ "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
|
||||
+ "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
|
||||
}
|
||||
saslPromise.trySuccess(null);
|
||||
} else if (saslPropsResolver != null) {
|
||||
|
@ -832,21 +828,21 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
|||
}
|
||||
doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken),
|
||||
buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise,
|
||||
client);
|
||||
client);
|
||||
} else {
|
||||
// It's a secured cluster using non-privileged ports, but no SASL. The only way this can
|
||||
// happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare
|
||||
// edge case.
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("SASL client skipping handshake in secured configuration with no SASL "
|
||||
+ "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
|
||||
+ "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
|
||||
}
|
||||
saslPromise.trySuccess(null);
|
||||
}
|
||||
}
|
||||
|
||||
static Encryptor createEncryptor(Configuration conf, HdfsFileStatus stat, DFSClient client)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
FileEncryptionInfo feInfo = stat.getFileEncryptionInfo();
|
||||
if (feInfo == null) {
|
||||
return null;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -17,33 +17,29 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.io.asyncfs;
|
||||
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.List;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
|
||||
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil;
|
||||
import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
|
||||
import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageDecoder;
|
||||
import org.apache.hbase.thirdparty.io.netty.util.internal.ObjectUtil;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder.
|
||||
* The Netty's ProtobufDecode supports unshaded protobuf messages (com.google.protobuf).
|
||||
*
|
||||
* Hadoop 3.3.0 and above relocates protobuf classes to a shaded jar (hadoop-thirdparty), and
|
||||
* so we must use reflection to detect which one (relocated or not) to use.
|
||||
*
|
||||
* Do not use this to process HBase's shaded protobuf messages. This is meant to process the
|
||||
* protobuf messages in HDFS for the asyncfs use case.
|
||||
* */
|
||||
* Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. The Netty's ProtobufDecode
|
||||
* supports unshaded protobuf messages (com.google.protobuf). Hadoop 3.3.0 and above relocates
|
||||
* protobuf classes to a shaded jar (hadoop-thirdparty), and so we must use reflection to detect
|
||||
* which one (relocated or not) to use. Do not use this to process HBase's shaded protobuf messages.
|
||||
* This is meant to process the protobuf messages in HDFS for the asyncfs use case.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ProtobufDecoder.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ProtobufDecoder.class);
|
||||
|
||||
private static Class<?> protobufMessageLiteClass = null;
|
||||
private static Class<?> protobufMessageLiteBuilderClass = null;
|
||||
|
@ -60,23 +56,22 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
|
|||
private Object parser;
|
||||
private Object builder;
|
||||
|
||||
|
||||
public ProtobufDecoder(Object prototype) {
|
||||
try {
|
||||
Method getDefaultInstanceForTypeMethod = protobufMessageLiteClass.getMethod(
|
||||
"getDefaultInstanceForType");
|
||||
Object prototype1 = getDefaultInstanceForTypeMethod
|
||||
.invoke(ObjectUtil.checkNotNull(prototype, "prototype"));
|
||||
Method getDefaultInstanceForTypeMethod =
|
||||
protobufMessageLiteClass.getMethod("getDefaultInstanceForType");
|
||||
Object prototype1 =
|
||||
getDefaultInstanceForTypeMethod.invoke(ObjectUtil.checkNotNull(prototype, "prototype"));
|
||||
|
||||
// parser = prototype.getParserForType()
|
||||
parser = getParserForTypeMethod.invoke(prototype1);
|
||||
parseFromMethod = parser.getClass().getMethod(
|
||||
"parseFrom", byte[].class, int.class, int.class);
|
||||
parseFromMethod =
|
||||
parser.getClass().getMethod("parseFrom", byte[].class, int.class, int.class);
|
||||
|
||||
// builder = prototype.newBuilderForType();
|
||||
builder = newBuilderForTypeMethod.invoke(prototype1);
|
||||
mergeFromMethod = builder.getClass().getMethod(
|
||||
"mergeFrom", byte[].class, int.class, int.class);
|
||||
mergeFromMethod =
|
||||
builder.getClass().getMethod("mergeFrom", byte[].class, int.class, int.class);
|
||||
|
||||
// All protobuf message builders inherits from MessageLite.Builder
|
||||
buildMethod = protobufMessageLiteBuilderClass.getDeclaredMethod("build");
|
||||
|
@ -88,8 +83,7 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
|
|||
}
|
||||
}
|
||||
|
||||
protected void decode(
|
||||
ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
|
||||
protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
|
||||
int length = msg.readableBytes();
|
||||
byte[] array;
|
||||
int offset;
|
||||
|
@ -122,8 +116,8 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
|
|||
|
||||
try {
|
||||
protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite");
|
||||
protobufMessageLiteBuilderClass = Class.forName(
|
||||
"org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder");
|
||||
protobufMessageLiteBuilderClass =
|
||||
Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder");
|
||||
LOG.debug("Hadoop 3.3 and above shades protobuf.");
|
||||
} catch (ClassNotFoundException e) {
|
||||
LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -22,7 +22,6 @@ import java.nio.ByteBuffer;
|
|||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
|
||||
|
@ -50,7 +49,7 @@ public class WrapperAsyncFSOutput implements AsyncFSOutput {
|
|||
public WrapperAsyncFSOutput(Path file, FSDataOutputStream out) {
|
||||
this.out = out;
|
||||
this.executor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true)
|
||||
.setNameFormat("AsyncFSOutputFlusher-" + file.toString().replace("%", "%%")).build());
|
||||
.setNameFormat("AsyncFSOutputFlusher-" + file.toString().replace("%", "%%")).build());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -95,8 +94,8 @@ public class WrapperAsyncFSOutput implements AsyncFSOutput {
|
|||
}
|
||||
long pos = out.getPos();
|
||||
/**
|
||||
* This flush0 method could only be called by single thread, so here we could
|
||||
* safely overwrite without any synchronization.
|
||||
* This flush0 method could only be called by single thread, so here we could safely overwrite
|
||||
* without any synchronization.
|
||||
*/
|
||||
this.syncedLength = pos;
|
||||
future.complete(pos);
|
||||
|
|
|
@ -56,24 +56,23 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
|
|||
private final int maxExcludeDNCount;
|
||||
private final Configuration conf;
|
||||
// This is a map of providerId->StreamSlowMonitor
|
||||
private final Map<String, StreamSlowMonitor> streamSlowMonitors =
|
||||
new ConcurrentHashMap<>(1);
|
||||
private final Map<String, StreamSlowMonitor> streamSlowMonitors = new ConcurrentHashMap<>(1);
|
||||
|
||||
public ExcludeDatanodeManager(Configuration conf) {
|
||||
this.conf = conf;
|
||||
this.maxExcludeDNCount = conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
|
||||
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT);
|
||||
this.excludeDNsCache = CacheBuilder.newBuilder()
|
||||
.expireAfterWrite(this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY,
|
||||
DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS)
|
||||
.maximumSize(this.maxExcludeDNCount)
|
||||
.build();
|
||||
.expireAfterWrite(
|
||||
this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
|
||||
TimeUnit.HOURS)
|
||||
.maximumSize(this.maxExcludeDNCount).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to add a datanode to the regionserver excluding cache
|
||||
* @param datanodeInfo the datanode to be added to the excluded cache
|
||||
* @param cause the cause that the datanode is hope to be excluded
|
||||
* @param cause the cause that the datanode is hope to be excluded
|
||||
* @return True if the datanode is added to the regionserver excluding cache, false otherwise
|
||||
*/
|
||||
public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) {
|
||||
|
@ -85,15 +84,15 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
|
|||
datanodeInfo, cause, excludeDNsCache.size());
|
||||
return true;
|
||||
}
|
||||
LOG.debug("Try add datanode {} to exclude cache by [{}] failed, "
|
||||
+ "current exclude DNs are {}", datanodeInfo, cause, getExcludeDNs().keySet());
|
||||
LOG.debug(
|
||||
"Try add datanode {} to exclude cache by [{}] failed, " + "current exclude DNs are {}",
|
||||
datanodeInfo, cause, getExcludeDNs().keySet());
|
||||
return false;
|
||||
}
|
||||
|
||||
public StreamSlowMonitor getStreamSlowMonitor(String name) {
|
||||
String key = name == null || name.isEmpty() ? "defaultMonitorName" : name;
|
||||
return streamSlowMonitors
|
||||
.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this));
|
||||
return streamSlowMonitors.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this));
|
||||
}
|
||||
|
||||
public Map<DatanodeInfo, Long> getExcludeDNs() {
|
||||
|
@ -105,10 +104,12 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
|
|||
for (StreamSlowMonitor monitor : streamSlowMonitors.values()) {
|
||||
monitor.onConfigurationChange(conf);
|
||||
}
|
||||
this.excludeDNsCache = CacheBuilder.newBuilder().expireAfterWrite(
|
||||
this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
|
||||
TimeUnit.HOURS).maximumSize(this.conf
|
||||
.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
|
||||
this.excludeDNsCache = CacheBuilder.newBuilder()
|
||||
.expireAfterWrite(
|
||||
this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
|
||||
TimeUnit.HOURS)
|
||||
.maximumSize(this.conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
|
||||
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,18 +38,16 @@ import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
|
|||
import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
|
||||
|
||||
/**
|
||||
* Class for monitor the wal file flush performance.
|
||||
* Each active wal file has a StreamSlowMonitor.
|
||||
* Class for monitor the wal file flush performance. Each active wal file has a StreamSlowMonitor.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class StreamSlowMonitor implements ConfigurationObserver {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(StreamSlowMonitor.class);
|
||||
|
||||
/**
|
||||
* Configure for the min count for a datanode detected slow.
|
||||
* If a datanode is detected slow times up to this count, then it will be added to the exclude
|
||||
* datanode cache by {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)}
|
||||
* of this regionsever.
|
||||
* Configure for the min count for a datanode detected slow. If a datanode is detected slow times
|
||||
* up to this count, then it will be added to the exclude datanode cache by
|
||||
* {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} of this regionsever.
|
||||
*/
|
||||
private static final String WAL_SLOW_DETECT_MIN_COUNT_KEY =
|
||||
"hbase.regionserver.async.wal.min.slow.detect.count";
|
||||
|
@ -63,9 +61,9 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
|||
private static final long DEFAULT_WAL_SLOW_DETECT_DATA_TTL = 10 * 60 * 1000; // 10min in ms
|
||||
|
||||
/**
|
||||
* Configure for the speed check of packet min length.
|
||||
* For packets whose data length smaller than this value, check slow by processing time.
|
||||
* While for packets whose data length larger than this value, check slow by flushing speed.
|
||||
* Configure for the speed check of packet min length. For packets whose data length smaller than
|
||||
* this value, check slow by processing time. While for packets whose data length larger than this
|
||||
* value, check slow by flushing speed.
|
||||
*/
|
||||
private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY =
|
||||
"hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min";
|
||||
|
@ -73,8 +71,8 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
|||
private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024;
|
||||
|
||||
/**
|
||||
* Configure for the slow packet process time, a duration from send to ACK.
|
||||
* The processing time check is for packets that data length smaller than
|
||||
* Configure for the slow packet process time, a duration from send to ACK. The processing time
|
||||
* check is for packets that data length smaller than
|
||||
* {@link StreamSlowMonitor#DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY}
|
||||
*/
|
||||
public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY =
|
||||
|
@ -105,15 +103,16 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
|||
private long minLengthForSpeedCheck;
|
||||
|
||||
public StreamSlowMonitor(Configuration conf, String name,
|
||||
ExcludeDatanodeManager excludeDatanodeManager) {
|
||||
ExcludeDatanodeManager excludeDatanodeManager) {
|
||||
setConf(conf);
|
||||
this.name = name;
|
||||
this.excludeDatanodeManager = excludeDatanodeManager;
|
||||
this.datanodeSlowDataQueue = CacheBuilder.newBuilder()
|
||||
.maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
|
||||
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
|
||||
.expireAfterWrite(conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY,
|
||||
DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS)
|
||||
.expireAfterWrite(
|
||||
conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
|
||||
TimeUnit.HOURS)
|
||||
.build(new CacheLoader<DatanodeInfo, Deque<PacketAckData>>() {
|
||||
@Override
|
||||
public Deque<PacketAckData> load(DatanodeInfo key) throws Exception {
|
||||
|
@ -129,30 +128,33 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
|||
|
||||
/**
|
||||
* Check if the packet process time shows that the relevant datanode is a slow node.
|
||||
* @param datanodeInfo the datanode that processed the packet
|
||||
* @param packetDataLen the data length of the packet (in bytes)
|
||||
* @param processTimeMs the process time (in ms) of the packet on the datanode,
|
||||
* @param datanodeInfo the datanode that processed the packet
|
||||
* @param packetDataLen the data length of the packet (in bytes)
|
||||
* @param processTimeMs the process time (in ms) of the packet on the datanode,
|
||||
* @param lastAckTimestamp the last acked timestamp of the packet on another datanode
|
||||
* @param unfinished if the packet is unfinished flushed to the datanode replicas
|
||||
* @param unfinished if the packet is unfinished flushed to the datanode replicas
|
||||
*/
|
||||
public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataLen,
|
||||
long processTimeMs, long lastAckTimestamp, int unfinished) {
|
||||
long processTimeMs, long lastAckTimestamp, int unfinished) {
|
||||
long current = EnvironmentEdgeManager.currentTime();
|
||||
// Here are two conditions used to determine whether a datanode is slow,
|
||||
// 1. For small packet, we just have a simple time limit, without considering
|
||||
// the size of the packet.
|
||||
// 2. For large packet, we will calculate the speed, and check if the speed is too slow.
|
||||
boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) || (
|
||||
packetDataLen > minLengthForSpeedCheck
|
||||
boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs)
|
||||
|| (packetDataLen > minLengthForSpeedCheck
|
||||
&& (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs);
|
||||
if (slow) {
|
||||
// Check if large diff ack timestamp between replicas,
|
||||
// should try to avoid misjudgments that caused by GC STW.
|
||||
if ((lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) || (
|
||||
lastAckTimestamp <= 0 && unfinished == 0)) {
|
||||
LOG.info("Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, "
|
||||
+ "lastAckTimestamp={}, monitor name: {}", datanodeInfo, packetDataLen, processTimeMs,
|
||||
unfinished, lastAckTimestamp, this.name);
|
||||
if (
|
||||
(lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2)
|
||||
|| (lastAckTimestamp <= 0 && unfinished == 0)
|
||||
) {
|
||||
LOG.info(
|
||||
"Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, "
|
||||
+ "lastAckTimestamp={}, monitor name: {}",
|
||||
datanodeInfo, packetDataLen, processTimeMs, unfinished, lastAckTimestamp, this.name);
|
||||
if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) {
|
||||
excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack");
|
||||
}
|
||||
|
@ -168,8 +170,10 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
|||
private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long processTime) {
|
||||
Deque<PacketAckData> slowDNQueue = datanodeSlowDataQueue.getUnchecked(datanodeInfo);
|
||||
long current = EnvironmentEdgeManager.currentTime();
|
||||
while (!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl
|
||||
|| slowDNQueue.size() >= minSlowDetectCount)) {
|
||||
while (
|
||||
!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl
|
||||
|| slowDNQueue.size() >= minSlowDetectCount)
|
||||
) {
|
||||
slowDNQueue.removeFirst();
|
||||
}
|
||||
slowDNQueue.addLast(new PacketAckData(dataLength, processTime));
|
||||
|
@ -177,13 +181,13 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
|||
}
|
||||
|
||||
private void setConf(Configuration conf) {
|
||||
this.minSlowDetectCount = conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY,
|
||||
DEFAULT_WAL_SLOW_DETECT_MIN_COUNT);
|
||||
this.minSlowDetectCount =
|
||||
conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, DEFAULT_WAL_SLOW_DETECT_MIN_COUNT);
|
||||
this.slowDataTtl = conf.getLong(WAL_SLOW_DETECT_DATA_TTL_KEY, DEFAULT_WAL_SLOW_DETECT_DATA_TTL);
|
||||
this.slowPacketAckMs = conf.getLong(DATANODE_SLOW_PACKET_PROCESS_TIME_KEY,
|
||||
DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME);
|
||||
this.minLengthForSpeedCheck = conf.getLong(
|
||||
DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY,
|
||||
DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME);
|
||||
this.minLengthForSpeedCheck =
|
||||
conf.getLong(DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY,
|
||||
DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH);
|
||||
this.minPacketFlushSpeedKBs = conf.getDouble(DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY,
|
||||
DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED);
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/*
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -21,8 +20,8 @@ package org.apache.hadoop.hbase.util;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Similar interface as {@link org.apache.hadoop.util.Progressable} but returns
|
||||
* a boolean to support canceling the operation.
|
||||
* Similar interface as {@link org.apache.hadoop.util.Progressable} but returns a boolean to support
|
||||
* canceling the operation.
|
||||
* <p/>
|
||||
* Used for doing updating of OPENING znode during log replay on region open.
|
||||
*/
|
||||
|
@ -30,8 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
public interface CancelableProgressable {
|
||||
|
||||
/**
|
||||
* Report progress. Returns true if operations should continue, false if the
|
||||
* operation should be canceled and rolled back.
|
||||
* Report progress. Returns true if operations should continue, false if the operation should be
|
||||
* canceled and rolled back.
|
||||
* @return whether to continue (true) or cancel (false) the operation
|
||||
*/
|
||||
boolean progress();
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -120,8 +120,10 @@ public final class RecoverLeaseFSUtils {
|
|||
// Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check
|
||||
// isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though.
|
||||
long localStartWaiting = EnvironmentEdgeManager.currentTime();
|
||||
while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPauseBase *
|
||||
nbAttempt) {
|
||||
while (
|
||||
(EnvironmentEdgeManager.currentTime() - localStartWaiting)
|
||||
< subsequentPauseBase * nbAttempt
|
||||
) {
|
||||
Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
|
||||
if (findIsFileClosedMeth) {
|
||||
try {
|
||||
|
@ -152,10 +154,10 @@ public final class RecoverLeaseFSUtils {
|
|||
private static boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout,
|
||||
final int nbAttempt, final Path p, final long startWaiting) {
|
||||
if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) {
|
||||
LOG.warn("Cannot recoverLease after trying for " +
|
||||
conf.getInt("hbase.lease.recovery.timeout", 900000) +
|
||||
"ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " +
|
||||
getLogMessageDetail(nbAttempt, p, startWaiting));
|
||||
LOG.warn("Cannot recoverLease after trying for "
|
||||
+ conf.getInt("hbase.lease.recovery.timeout", 900000)
|
||||
+ "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; "
|
||||
+ getLogMessageDetail(nbAttempt, p, startWaiting));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -170,8 +172,8 @@ public final class RecoverLeaseFSUtils {
|
|||
boolean recovered = false;
|
||||
try {
|
||||
recovered = dfs.recoverLease(p);
|
||||
LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") +
|
||||
getLogMessageDetail(nbAttempt, p, startWaiting));
|
||||
LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ")
|
||||
+ getLogMessageDetail(nbAttempt, p, startWaiting));
|
||||
} catch (IOException e) {
|
||||
if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) {
|
||||
// This exception comes out instead of FNFE, fix it
|
||||
|
@ -189,8 +191,8 @@ public final class RecoverLeaseFSUtils {
|
|||
*/
|
||||
private static String getLogMessageDetail(final int nbAttempt, final Path p,
|
||||
final long startWaiting) {
|
||||
return "attempt=" + nbAttempt + " on file=" + p + " after " +
|
||||
(EnvironmentEdgeManager.currentTime() - startWaiting) + "ms";
|
||||
return "attempt=" + nbAttempt + " on file=" + p + " after "
|
||||
+ (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms";
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.io.asyncfs;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
|
@ -44,19 +45,15 @@ public class TestExcludeDatanodeManager {
|
|||
StreamSlowMonitor streamSlowDNsMonitor =
|
||||
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
|
||||
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
|
||||
DatanodeInfo datanodeInfo =
|
||||
new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1")
|
||||
.setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333)
|
||||
.setIpcPort(444).setNetworkLocation("location1").build();
|
||||
streamSlowDNsMonitor
|
||||
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
||||
System.currentTimeMillis() - 5100, 0);
|
||||
streamSlowDNsMonitor
|
||||
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
||||
System.currentTimeMillis() - 5100, 0);
|
||||
streamSlowDNsMonitor
|
||||
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
||||
System.currentTimeMillis() - 5100, 0);
|
||||
DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0")
|
||||
.setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222)
|
||||
.setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build();
|
||||
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
||||
System.currentTimeMillis() - 5100, 0);
|
||||
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
||||
System.currentTimeMillis() - 5100, 0);
|
||||
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
||||
System.currentTimeMillis() - 5100, 0);
|
||||
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
|
||||
assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo));
|
||||
}
|
||||
|
@ -68,19 +65,15 @@ public class TestExcludeDatanodeManager {
|
|||
StreamSlowMonitor streamSlowDNsMonitor =
|
||||
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
|
||||
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
|
||||
DatanodeInfo datanodeInfo =
|
||||
new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1")
|
||||
.setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333)
|
||||
.setIpcPort(444).setNetworkLocation("location1").build();
|
||||
streamSlowDNsMonitor
|
||||
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
||||
System.currentTimeMillis() - 7000, 0);
|
||||
streamSlowDNsMonitor
|
||||
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
||||
System.currentTimeMillis() - 7000, 0);
|
||||
streamSlowDNsMonitor
|
||||
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
||||
System.currentTimeMillis() - 7000, 0);
|
||||
DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0")
|
||||
.setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222)
|
||||
.setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build();
|
||||
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
||||
System.currentTimeMillis() - 7000, 0);
|
||||
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
||||
System.currentTimeMillis() - 7000, 0);
|
||||
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
||||
System.currentTimeMillis() - 7000, 0);
|
||||
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
|
||||
assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo));
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -57,6 +57,7 @@ import org.junit.experimental.categories.Category;
|
|||
import org.junit.rules.TestName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
|
||||
import org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
|
||||
import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
|
||||
|
@ -240,9 +241,9 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
|
|||
StreamSlowMonitor streamSlowDNsMonitor =
|
||||
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
|
||||
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
|
||||
try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS,
|
||||
f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop,
|
||||
CHANNEL_CLASS, streamSlowDNsMonitor)) {
|
||||
try (FanOutOneBlockAsyncDFSOutput output =
|
||||
FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3,
|
||||
FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, streamSlowDNsMonitor)) {
|
||||
// should exclude the dead dn when retry so here we only have 2 DNs in pipeline
|
||||
assertEquals(2, output.getPipeline().length);
|
||||
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -47,6 +47,7 @@ import org.junit.experimental.categories.Category;
|
|||
import org.junit.rules.TestName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
|
||||
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
|
||||
import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
|
||||
|
@ -70,10 +71,10 @@ public class TestFanOutOneBlockAsyncDFSOutputHang extends AsyncFSTestBase {
|
|||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutputHang.class);
|
||||
HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutputHang.class);
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutputHang.class);
|
||||
LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutputHang.class);
|
||||
|
||||
private static DistributedFileSystem FS;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -31,7 +31,7 @@ public class TestSendBufSizePredictor {
|
|||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestSendBufSizePredictor.class);
|
||||
HBaseClassTestRule.forClass(TestSendBufSizePredictor.class);
|
||||
|
||||
@Test
|
||||
public void test() {
|
||||
|
|
|
@ -110,9 +110,9 @@ public final class HBaseKerberosUtils {
|
|||
|
||||
/**
|
||||
* Set up configuration for a secure HDFS+HBase cluster.
|
||||
* @param conf configuration object.
|
||||
* @param conf configuration object.
|
||||
* @param servicePrincipal service principal used by NN, HM and RS.
|
||||
* @param spnegoPrincipal SPNEGO principal used by NN web UI.
|
||||
* @param spnegoPrincipal SPNEGO principal used by NN web UI.
|
||||
*/
|
||||
public static void setSecuredConfiguration(Configuration conf, String servicePrincipal,
|
||||
String spnegoPrincipal) {
|
||||
|
@ -156,7 +156,7 @@ public final class HBaseKerberosUtils {
|
|||
/**
|
||||
* Set up SSL configuration for HDFS NameNode and DataNode.
|
||||
* @param utility a HBaseTestingUtility object.
|
||||
* @param clazz the caller test class.
|
||||
* @param clazz the caller test class.
|
||||
* @throws Exception if unable to set up SSL configuration
|
||||
*/
|
||||
public static void setSSLConfiguration(HBaseCommonTestingUtil utility, Class<?> clazz)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.util;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
|
@ -69,8 +68,8 @@ public class TestRecoverLeaseFSUtils {
|
|||
Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
|
||||
// Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two
|
||||
// invocations will happen pretty fast... the we fall into the longer wait loop).
|
||||
assertTrue((EnvironmentEdgeManager.currentTime() - startTime) > (3 *
|
||||
HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
|
||||
assertTrue((EnvironmentEdgeManager.currentTime() - startTime)
|
||||
> (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
<?xml version="1.0"?>
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<!--
|
||||
/**
|
||||
|
@ -21,34 +21,14 @@
|
|||
-->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<artifactId>hbase-build-configuration</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-build-configuration</artifactId>
|
||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||
<relativePath>../hbase-build-configuration</relativePath>
|
||||
</parent>
|
||||
<artifactId>hbase-backup</artifactId>
|
||||
<name>Apache HBase - Backup</name>
|
||||
<description>Backup for HBase</description>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<!--Make it so assembly:single does nothing in here-->
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<configuration>
|
||||
<skipAssembly>true</skipAssembly>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<!-- Make a jar and put the sources in the jar -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>net.revelc.code</groupId>
|
||||
<artifactId>warbucks-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<dependencies>
|
||||
<!-- Intra-project dependencies -->
|
||||
<dependency>
|
||||
|
@ -173,12 +153,34 @@
|
|||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<!--Make it so assembly:single does nothing in here-->
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<configuration>
|
||||
<skipAssembly>true</skipAssembly>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<!-- Make a jar and put the sources in the jar -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>net.revelc.code</groupId>
|
||||
<artifactId>warbucks-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<profiles>
|
||||
<!-- Profile for building against Hadoop 3.0.0. Activate by default -->
|
||||
<profile>
|
||||
<id>hadoop-3.0</id>
|
||||
<activation>
|
||||
<property><name>!hadoop.profile</name></property>
|
||||
<property>
|
||||
<name>!hadoop.profile</name>
|
||||
</property>
|
||||
</activation>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
|
@ -213,8 +215,7 @@
|
|||
<artifactId>lifecycle-mapping</artifactId>
|
||||
<configuration>
|
||||
<lifecycleMappingMetadata>
|
||||
<pluginExecutions>
|
||||
</pluginExecutions>
|
||||
<pluginExecutions/>
|
||||
</lifecycleMappingMetadata>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,13 +15,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.backup.util.BackupSet;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -30,8 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
* The administrative API for HBase Backup. Construct an instance and call {@link #close()}
|
||||
* afterwards.
|
||||
* <p>
|
||||
* BackupAdmin can be used to create backups, restore data from backups and for other
|
||||
* backup-related operations.
|
||||
* BackupAdmin can be used to create backups, restore data from backups and for other backup-related
|
||||
* operations.
|
||||
* @since 2.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
|
@ -71,9 +69,9 @@ public interface BackupAdmin extends Closeable {
|
|||
|
||||
/**
|
||||
* Merge backup images command
|
||||
* @param backupIds array of backup ids of images to be merged
|
||||
* The resulting backup image will have the same backup id as the most
|
||||
* recent image from a list of images to be merged
|
||||
* @param backupIds array of backup ids of images to be merged The resulting backup image will
|
||||
* have the same backup id as the most recent image from a list of images to be
|
||||
* merged
|
||||
* @throws IOException exception
|
||||
*/
|
||||
void mergeBackups(String[] backupIds) throws IOException;
|
||||
|
@ -120,7 +118,7 @@ public interface BackupAdmin extends Closeable {
|
|||
|
||||
/**
|
||||
* Add tables to backup set command
|
||||
* @param name name of backup set.
|
||||
* @param name name of backup set.
|
||||
* @param tables array of tables to be added to this set.
|
||||
* @throws IOException exception
|
||||
*/
|
||||
|
@ -128,7 +126,7 @@ public interface BackupAdmin extends Closeable {
|
|||
|
||||
/**
|
||||
* Remove tables from backup set
|
||||
* @param name name of backup set.
|
||||
* @param name name of backup set.
|
||||
* @param tables array of tables to be removed from this set.
|
||||
* @throws IOException exception
|
||||
*/
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -18,13 +18,11 @@
|
|||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
|
||||
import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
|
||||
import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,11 +15,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.backup.impl.BackupManager;
|
||||
|
@ -34,16 +32,16 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
public interface BackupCopyJob extends Configurable {
|
||||
/**
|
||||
* Copy backup data to destination
|
||||
* @param backupInfo context object
|
||||
* @param backupInfo context object
|
||||
* @param backupManager backup manager
|
||||
* @param conf configuration
|
||||
* @param backupType backup type (FULL or INCREMENTAL)
|
||||
* @param options array of options (implementation-specific)
|
||||
* @param conf configuration
|
||||
* @param backupType backup type (FULL or INCREMENTAL)
|
||||
* @param options array of options (implementation-specific)
|
||||
* @return result (0 - success, -1 failure )
|
||||
* @throws IOException exception
|
||||
*/
|
||||
int copy(BackupInfo backupInfo, BackupManager backupManager, Configuration conf,
|
||||
BackupType backupType, String[] options) throws IOException;
|
||||
BackupType backupType, String[] options) throws IOException;
|
||||
|
||||
/**
|
||||
* Cancel copy job
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -58,9 +58,7 @@ import org.slf4j.LoggerFactory;
|
|||
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
||||
|
||||
/**
|
||||
*
|
||||
* Command-line entry point for backup operation
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class BackupDriver extends AbstractHBaseTool {
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -54,7 +53,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
|
|||
private Connection connection;
|
||||
private long prevReadFromBackupTbl = 0, // timestamp of most recent read from backup:system table
|
||||
secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from backup:system table
|
||||
//used by unit test to skip reading backup:system
|
||||
// used by unit test to skip reading backup:system
|
||||
private boolean checkForFullyBackedUpTables = true;
|
||||
private List<TableName> fullyBackedUpTables = null;
|
||||
|
||||
|
@ -79,8 +78,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
|
|||
connection = ConnectionFactory.createConnection(conf);
|
||||
}
|
||||
try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
||||
Map<byte[], List<Path>>[] res =
|
||||
tbl.readBulkLoadedFiles(null, tableList);
|
||||
Map<byte[], List<Path>>[] res = tbl.readBulkLoadedFiles(null, tableList);
|
||||
secondPrevReadFromBackupTbl = prevReadFromBackupTbl;
|
||||
prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime();
|
||||
return getFilenameFromBulkLoad(res);
|
||||
|
@ -91,6 +89,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
|
|||
void setCheckForFullyBackedUpTables(boolean b) {
|
||||
checkForFullyBackedUpTables = b;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
|
||||
if (conf == null) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
|
||||
|
@ -59,7 +59,10 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
|||
* Backup session states
|
||||
*/
|
||||
public enum BackupState {
|
||||
RUNNING, COMPLETE, FAILED, ANY
|
||||
RUNNING,
|
||||
COMPLETE,
|
||||
FAILED,
|
||||
ANY
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -67,7 +70,12 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
|||
* BackupState.RUNNING
|
||||
*/
|
||||
public enum BackupPhase {
|
||||
REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST
|
||||
REQUEST,
|
||||
SNAPSHOT,
|
||||
PREPARE_INCREMENTAL,
|
||||
SNAPSHOTCOPY,
|
||||
INCREMENTAL_COPY,
|
||||
STORE_MANIFEST
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -137,8 +145,8 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
|||
private Map<TableName, Map<String, Long>> tableSetTimestampMap;
|
||||
|
||||
/**
|
||||
* Previous Region server log timestamps for table set after distributed log roll key -
|
||||
* table name, value - map of RegionServer hostname -> last log rolled timestamp
|
||||
* Previous Region server log timestamps for table set after distributed log roll key - table
|
||||
* name, value - map of RegionServer hostname -> last log rolled timestamp
|
||||
*/
|
||||
private Map<TableName, Map<String, Long>> incrTimestampMap;
|
||||
|
||||
|
@ -198,8 +206,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
|||
return tableSetTimestampMap;
|
||||
}
|
||||
|
||||
public void setTableSetTimestampMap(Map<TableName,
|
||||
Map<String, Long>> tableSetTimestampMap) {
|
||||
public void setTableSetTimestampMap(Map<TableName, Map<String, Long>> tableSetTimestampMap) {
|
||||
this.tableSetTimestampMap = tableSetTimestampMap;
|
||||
}
|
||||
|
||||
|
@ -357,8 +364,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
|||
* Set the new region server log timestamps after distributed log roll
|
||||
* @param prevTableSetTimestampMap table timestamp map
|
||||
*/
|
||||
public void setIncrTimestampMap(Map<TableName,
|
||||
Map<String, Long>> prevTableSetTimestampMap) {
|
||||
public void setIncrTimestampMap(Map<TableName, Map<String, Long>> prevTableSetTimestampMap) {
|
||||
this.incrTimestampMap = prevTableSetTimestampMap;
|
||||
}
|
||||
|
||||
|
@ -482,8 +488,8 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
|||
context.setState(BackupInfo.BackupState.valueOf(proto.getBackupState().name()));
|
||||
}
|
||||
|
||||
context.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(),
|
||||
proto.getBackupId()));
|
||||
context
|
||||
.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(), proto.getBackupId()));
|
||||
|
||||
if (proto.hasBackupPhase()) {
|
||||
context.setPhase(BackupPhase.valueOf(proto.getBackupPhase().name()));
|
||||
|
@ -507,12 +513,12 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
|||
return map;
|
||||
}
|
||||
|
||||
private static Map<TableName, Map<String, Long>> getTableSetTimestampMap(
|
||||
Map<String, BackupProtos.BackupInfo.RSTimestampMap> map) {
|
||||
private static Map<TableName, Map<String, Long>>
|
||||
getTableSetTimestampMap(Map<String, BackupProtos.BackupInfo.RSTimestampMap> map) {
|
||||
Map<TableName, Map<String, Long>> tableSetTimestampMap = new HashMap<>();
|
||||
for (Entry<String, BackupProtos.BackupInfo.RSTimestampMap> entry : map.entrySet()) {
|
||||
tableSetTimestampMap
|
||||
.put(TableName.valueOf(entry.getKey()), entry.getValue().getRsTimestampMap());
|
||||
tableSetTimestampMap.put(TableName.valueOf(entry.getKey()),
|
||||
entry.getValue().getRsTimestampMap());
|
||||
}
|
||||
|
||||
return tableSetTimestampMap;
|
||||
|
@ -549,7 +555,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
|||
public String getStatusAndProgressAsString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("id: ").append(getBackupId()).append(" state: ").append(getState())
|
||||
.append(" progress: ").append(getProgress());
|
||||
.append(" progress: ").append(getProgress());
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
@ -567,7 +573,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
|||
@Override
|
||||
public int compareTo(BackupInfo o) {
|
||||
Long thisTS =
|
||||
Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1));
|
||||
Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1));
|
||||
Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1));
|
||||
return thisTS.compareTo(otherTS);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,11 +15,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
@ -32,7 +30,6 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
public interface BackupMergeJob extends Configurable {
|
||||
/**
|
||||
* Run backup merge operation.
|
||||
*
|
||||
* @param backupIds backup image ids
|
||||
* @throws IOException if the backup merge operation fails
|
||||
*/
|
||||
|
|
|
@ -7,14 +7,13 @@
|
|||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
|
@ -22,7 +21,6 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
|
@ -56,7 +54,7 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver {
|
|||
@Override
|
||||
public void postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
Configuration cfg = ctx.getEnvironment().getConfiguration();
|
||||
if (finalPaths == null) {
|
||||
// there is no need to record state
|
||||
|
@ -67,7 +65,7 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver {
|
|||
return;
|
||||
}
|
||||
try (Connection connection = ConnectionFactory.createConnection(cfg);
|
||||
BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
||||
BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
||||
List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
|
||||
RegionInfo info = ctx.getEnvironment().getRegionInfo();
|
||||
TableName tableName = info.getTable();
|
||||
|
@ -82,16 +80,17 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver {
|
|||
LOG.error("Failed to get tables which have been fully backed up", ioe);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
|
||||
final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
|
||||
Configuration cfg = ctx.getEnvironment().getConfiguration();
|
||||
if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) {
|
||||
LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled");
|
||||
return;
|
||||
}
|
||||
try (Connection connection = ConnectionFactory.createConnection(cfg);
|
||||
BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
||||
BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
||||
List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
|
||||
RegionInfo info = ctx.getEnvironment().getRegionInfo();
|
||||
TableName tableName = info.getTable();
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,11 +15,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -45,14 +44,14 @@ public interface BackupRestoreConstants {
|
|||
int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 10000;
|
||||
|
||||
/*
|
||||
* Drivers option list
|
||||
* Drivers option list
|
||||
*/
|
||||
String OPTION_OVERWRITE = "o";
|
||||
String OPTION_OVERWRITE_DESC = "Overwrite data if any of the restore target tables exists";
|
||||
|
||||
String OPTION_CHECK = "c";
|
||||
String OPTION_CHECK_DESC =
|
||||
"Check restore sequence and dependencies only (does not execute the command)";
|
||||
"Check restore sequence and dependencies only (does not execute the command)";
|
||||
|
||||
String OPTION_SET = "s";
|
||||
String OPTION_SET_DESC = "Backup set name";
|
||||
|
@ -62,8 +61,8 @@ public interface BackupRestoreConstants {
|
|||
String OPTION_DEBUG_DESC = "Enable debug loggings";
|
||||
|
||||
String OPTION_TABLE = "t";
|
||||
String OPTION_TABLE_DESC = "Table name. If specified, only backup images,"
|
||||
+ " which contain this table will be listed.";
|
||||
String OPTION_TABLE_DESC =
|
||||
"Table name. If specified, only backup images," + " which contain this table will be listed.";
|
||||
|
||||
String OPTION_LIST = "l";
|
||||
String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated.";
|
||||
|
@ -84,37 +83,32 @@ public interface BackupRestoreConstants {
|
|||
String OPTION_KEEP = "k";
|
||||
String OPTION_KEEP_DESC = "Specifies maximum age of backup (in days) to keep during bulk delete";
|
||||
|
||||
|
||||
String OPTION_TABLE_MAPPING = "m";
|
||||
String OPTION_TABLE_MAPPING_DESC =
|
||||
"A comma separated list of target tables. "
|
||||
+ "If specified, each table in <tables> must have a mapping";
|
||||
String OPTION_TABLE_MAPPING_DESC = "A comma separated list of target tables. "
|
||||
+ "If specified, each table in <tables> must have a mapping";
|
||||
String OPTION_YARN_QUEUE_NAME = "q";
|
||||
String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on";
|
||||
String OPTION_YARN_QUEUE_NAME_RESTORE_DESC = "Yarn queue name to run backup restore command on";
|
||||
|
||||
String JOB_NAME_CONF_KEY = "mapreduce.job.name";
|
||||
|
||||
String BACKUP_CONFIG_STRING = BackupRestoreConstants.BACKUP_ENABLE_KEY
|
||||
+ "=true\n"
|
||||
+ "hbase.master.logcleaner.plugins="
|
||||
+"YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
|
||||
+ "hbase.procedure.master.classes=YOUR_CLASSES,"
|
||||
+"org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
|
||||
+ "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
|
||||
+ "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
|
||||
+ "hbase.coprocessor.region.classes=YOUR_CLASSES,"
|
||||
+ "org.apache.hadoop.hbase.backup.BackupObserver\n"
|
||||
+ "and restart the cluster\n"
|
||||
+ "For more information please see http://hbase.apache.org/book.html#backuprestore\n";
|
||||
String ENABLE_BACKUP = "Backup is not enabled. To enable backup, "+
|
||||
"in hbase-site.xml, set:\n "
|
||||
+ BACKUP_CONFIG_STRING;
|
||||
String BACKUP_CONFIG_STRING =
|
||||
BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n" + "hbase.master.logcleaner.plugins="
|
||||
+ "YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
|
||||
+ "hbase.procedure.master.classes=YOUR_CLASSES,"
|
||||
+ "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
|
||||
+ "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
|
||||
+ "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
|
||||
+ "hbase.coprocessor.region.classes=YOUR_CLASSES,"
|
||||
+ "org.apache.hadoop.hbase.backup.BackupObserver\n" + "and restart the cluster\n"
|
||||
+ "For more information please see http://hbase.apache.org/book.html#backuprestore\n";
|
||||
String ENABLE_BACKUP = "Backup is not enabled. To enable backup, " + "in hbase-site.xml, set:\n "
|
||||
+ BACKUP_CONFIG_STRING;
|
||||
|
||||
String VERIFY_BACKUP = "To enable backup, in hbase-site.xml, set:\n " + BACKUP_CONFIG_STRING;
|
||||
|
||||
/*
|
||||
* Delimiter in table name list in restore command
|
||||
* Delimiter in table name list in restore command
|
||||
*/
|
||||
String TABLENAME_DELIMITER_IN_COMMAND = ",";
|
||||
|
||||
|
@ -123,7 +117,24 @@ public interface BackupRestoreConstants {
|
|||
String BACKUPID_PREFIX = "backup_";
|
||||
|
||||
enum BackupCommand {
|
||||
CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS,
|
||||
SET, SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST, REPAIR
|
||||
CREATE,
|
||||
CANCEL,
|
||||
DELETE,
|
||||
DESCRIBE,
|
||||
HISTORY,
|
||||
STATUS,
|
||||
CONVERT,
|
||||
MERGE,
|
||||
STOP,
|
||||
SHOW,
|
||||
HELP,
|
||||
PROGRESS,
|
||||
SET,
|
||||
SET_ADD,
|
||||
SET_REMOVE,
|
||||
SET_DELETE,
|
||||
SET_DESCRIBE,
|
||||
SET_LIST,
|
||||
REPAIR
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -26,7 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
|
||||
/**
|
||||
* Factory implementation for backup/restore related jobs
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class BackupRestoreFactory {
|
||||
|
@ -45,7 +44,7 @@ public final class BackupRestoreFactory {
|
|||
*/
|
||||
public static RestoreJob getRestoreJob(Configuration conf) {
|
||||
Class<? extends RestoreJob> cls =
|
||||
conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class);
|
||||
conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class);
|
||||
RestoreJob service = ReflectionUtils.newInstance(cls, conf);
|
||||
service.setConf(conf);
|
||||
return service;
|
||||
|
@ -57,9 +56,8 @@ public final class BackupRestoreFactory {
|
|||
* @return backup copy job instance
|
||||
*/
|
||||
public static BackupCopyJob getBackupCopyJob(Configuration conf) {
|
||||
Class<? extends BackupCopyJob> cls =
|
||||
conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyJob.class,
|
||||
BackupCopyJob.class);
|
||||
Class<? extends BackupCopyJob> cls = conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS,
|
||||
MapReduceBackupCopyJob.class, BackupCopyJob.class);
|
||||
BackupCopyJob service = ReflectionUtils.newInstance(cls, conf);
|
||||
service.setConf(conf);
|
||||
return service;
|
||||
|
@ -71,9 +69,8 @@ public final class BackupRestoreFactory {
|
|||
* @return backup merge job instance
|
||||
*/
|
||||
public static BackupMergeJob getBackupMergeJob(Configuration conf) {
|
||||
Class<? extends BackupMergeJob> cls =
|
||||
conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS, MapReduceBackupMergeJob.class,
|
||||
BackupMergeJob.class);
|
||||
Class<? extends BackupMergeJob> cls = conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS,
|
||||
MapReduceBackupMergeJob.class, BackupMergeJob.class);
|
||||
BackupMergeJob service = ReflectionUtils.newInstance(cls, conf);
|
||||
service.setConf(conf);
|
||||
return service;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,11 +15,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
||||
|
||||
|
@ -29,14 +29,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
|||
*/
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class BackupTableInfo {
|
||||
public class BackupTableInfo {
|
||||
/*
|
||||
* Table name for backup
|
||||
* Table name for backup
|
||||
*/
|
||||
private TableName table;
|
||||
|
||||
/*
|
||||
* Snapshot name for offline/online snapshot
|
||||
* Snapshot name for offline/online snapshot
|
||||
*/
|
||||
private String snapshotName = null;
|
||||
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
@ -16,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -52,15 +49,15 @@ public final class HBackupFileSystem {
|
|||
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
|
||||
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
|
||||
* @param backupRootDir backup root directory
|
||||
* @param backupId backup id
|
||||
* @param tableName table name
|
||||
* @param backupId backup id
|
||||
* @param tableName table name
|
||||
* @return backupPath String for the particular table
|
||||
*/
|
||||
public static String
|
||||
getTableBackupDir(String backupRootDir, String backupId, TableName tableName) {
|
||||
public static String getTableBackupDir(String backupRootDir, String backupId,
|
||||
TableName tableName) {
|
||||
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
|
||||
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
|
||||
+ Path.SEPARATOR;
|
||||
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
|
||||
+ Path.SEPARATOR;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,7 +72,7 @@ public final class HBackupFileSystem {
|
|||
/**
|
||||
* Get backup tmp directory for backupId
|
||||
* @param backupRoot backup root
|
||||
* @param backupId backup id
|
||||
* @param backupId backup id
|
||||
* @return backup tmp directory path
|
||||
*/
|
||||
public static Path getBackupTmpDirPathForBackupId(String backupRoot, String backupId) {
|
||||
|
@ -83,7 +80,7 @@ public final class HBackupFileSystem {
|
|||
}
|
||||
|
||||
public static String getTableBackupDataDir(String backupRootDir, String backupId,
|
||||
TableName tableName) {
|
||||
TableName tableName) {
|
||||
return getTableBackupDir(backupRootDir, backupId, tableName) + Path.SEPARATOR + "data";
|
||||
}
|
||||
|
||||
|
@ -97,8 +94,8 @@ public final class HBackupFileSystem {
|
|||
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
|
||||
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
|
||||
* @param backupRootPath backup root path
|
||||
* @param tableName table name
|
||||
* @param backupId backup Id
|
||||
* @param tableName table name
|
||||
* @param backupId backup Id
|
||||
* @return backupPath for the particular table
|
||||
*/
|
||||
public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) {
|
||||
|
@ -109,12 +106,12 @@ public final class HBackupFileSystem {
|
|||
* Given the backup root dir and the backup id, return the log file location for an incremental
|
||||
* backup.
|
||||
* @param backupRootDir backup root directory
|
||||
* @param backupId backup id
|
||||
* @param backupId backup id
|
||||
* @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738"
|
||||
*/
|
||||
public static String getLogBackupDir(String backupRootDir, String backupId) {
|
||||
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
|
||||
+ HConstants.HREGION_LOGDIR_NAME;
|
||||
+ HConstants.HREGION_LOGDIR_NAME;
|
||||
}
|
||||
|
||||
public static Path getLogBackupPath(String backupRootDir, String backupId) {
|
||||
|
@ -124,37 +121,35 @@ public final class HBackupFileSystem {
|
|||
// TODO we do not keep WAL files anymore
|
||||
// Move manifest file to other place
|
||||
private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
FileSystem fs = backupRootPath.getFileSystem(conf);
|
||||
Path manifestPath =
|
||||
new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR
|
||||
+ BackupManifest.MANIFEST_FILE_NAME);
|
||||
Path manifestPath = new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR
|
||||
+ BackupManifest.MANIFEST_FILE_NAME);
|
||||
if (!fs.exists(manifestPath)) {
|
||||
String errorMsg =
|
||||
"Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for "
|
||||
+ backupId + ". File " + manifestPath + " does not exists. Did " + backupId
|
||||
+ " correspond to previously taken backup ?";
|
||||
String errorMsg = "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME
|
||||
+ " for " + backupId + ". File " + manifestPath + " does not exists. Did " + backupId
|
||||
+ " correspond to previously taken backup ?";
|
||||
throw new IOException(errorMsg);
|
||||
}
|
||||
return manifestPath;
|
||||
}
|
||||
|
||||
public static BackupManifest
|
||||
getManifest(Configuration conf, Path backupRootPath, String backupId) throws IOException {
|
||||
public static BackupManifest getManifest(Configuration conf, Path backupRootPath, String backupId)
|
||||
throws IOException {
|
||||
BackupManifest manifest =
|
||||
new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId));
|
||||
new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId));
|
||||
return manifest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether the backup image path and there is manifest file in the path.
|
||||
* @param backupManifestMap If all the manifests are found, then they are put into this map
|
||||
* @param tableArray the tables involved
|
||||
* @param tableArray the tables involved
|
||||
* @throws IOException exception
|
||||
*/
|
||||
public static void checkImageManifestExist(HashMap<TableName, BackupManifest> backupManifestMap,
|
||||
TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId)
|
||||
throws IOException {
|
||||
TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId)
|
||||
throws IOException {
|
||||
for (TableName tableName : tableArray) {
|
||||
BackupManifest manifest = getManifest(conf, backupRootPath, backupId);
|
||||
backupManifestMap.put(tableName, manifest);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -59,9 +59,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
|||
import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
|
||||
|
||||
/**
|
||||
*
|
||||
* Command-line entry point for restore operation
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RestoreDriver extends AbstractHBaseTool {
|
||||
|
@ -69,10 +67,10 @@ public class RestoreDriver extends AbstractHBaseTool {
|
|||
private CommandLine cmd;
|
||||
|
||||
private static final String USAGE_STRING =
|
||||
"Usage: hbase restore <backup_path> <backup_id> [options]\n"
|
||||
+ " backup_path Path to a backup destination root\n"
|
||||
+ " backup_id Backup image ID to restore\n"
|
||||
+ " table(s) Comma-separated list of tables to restore\n";
|
||||
"Usage: hbase restore <backup_path> <backup_id> [options]\n"
|
||||
+ " backup_path Path to a backup destination root\n"
|
||||
+ " backup_id Backup image ID to restore\n"
|
||||
+ " table(s) Comma-separated list of tables to restore\n";
|
||||
|
||||
private static final String USAGE_FOOTER = "";
|
||||
|
||||
|
@ -101,19 +99,19 @@ public class RestoreDriver extends AbstractHBaseTool {
|
|||
boolean overwrite = cmd.hasOption(OPTION_OVERWRITE);
|
||||
if (overwrite) {
|
||||
LOG.debug("Found -overwrite option in restore command, "
|
||||
+ "will overwrite to existing table if any in the restore target");
|
||||
+ "will overwrite to existing table if any in the restore target");
|
||||
}
|
||||
|
||||
// whether to only check the dependencies, false by default
|
||||
boolean check = cmd.hasOption(OPTION_CHECK);
|
||||
if (check) {
|
||||
LOG.debug("Found -check option in restore command, "
|
||||
+ "will check and verify the dependencies");
|
||||
LOG.debug(
|
||||
"Found -check option in restore command, " + "will check and verify the dependencies");
|
||||
}
|
||||
|
||||
if (cmd.hasOption(OPTION_SET) && cmd.hasOption(OPTION_TABLE)) {
|
||||
System.err.println("Options -s and -t are mutaully exclusive,"+
|
||||
" you can not specify both of them.");
|
||||
System.err.println(
|
||||
"Options -s and -t are mutaully exclusive," + " you can not specify both of them.");
|
||||
printToolUsage();
|
||||
return -1;
|
||||
}
|
||||
|
@ -141,9 +139,9 @@ public class RestoreDriver extends AbstractHBaseTool {
|
|||
String backupId = remainArgs[1];
|
||||
String tables;
|
||||
String tableMapping =
|
||||
cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
|
||||
cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
|
||||
try (final Connection conn = ConnectionFactory.createConnection(conf);
|
||||
BackupAdmin client = new BackupAdminImpl(conn)) {
|
||||
BackupAdmin client = new BackupAdminImpl(conn)) {
|
||||
// Check backup set
|
||||
if (cmd.hasOption(OPTION_SET)) {
|
||||
String setName = cmd.getOptionValue(OPTION_SET);
|
||||
|
@ -155,8 +153,8 @@ public class RestoreDriver extends AbstractHBaseTool {
|
|||
return -2;
|
||||
}
|
||||
if (tables == null) {
|
||||
System.out.println("ERROR: Backup set '" + setName
|
||||
+ "' is either empty or does not exist");
|
||||
System.out
|
||||
.println("ERROR: Backup set '" + setName + "' is either empty or does not exist");
|
||||
printToolUsage();
|
||||
return -3;
|
||||
}
|
||||
|
@ -167,15 +165,16 @@ public class RestoreDriver extends AbstractHBaseTool {
|
|||
TableName[] sTableArray = BackupUtils.parseTableNames(tables);
|
||||
TableName[] tTableArray = BackupUtils.parseTableNames(tableMapping);
|
||||
|
||||
if (sTableArray != null && tTableArray != null &&
|
||||
(sTableArray.length != tTableArray.length)) {
|
||||
if (
|
||||
sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)
|
||||
) {
|
||||
System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping);
|
||||
printToolUsage();
|
||||
return -4;
|
||||
}
|
||||
|
||||
client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check,
|
||||
sTableArray, tTableArray, overwrite));
|
||||
client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check, sTableArray,
|
||||
tTableArray, overwrite));
|
||||
} catch (Exception e) {
|
||||
LOG.error("Error while running restore backup", e);
|
||||
return -5;
|
||||
|
@ -184,7 +183,7 @@ public class RestoreDriver extends AbstractHBaseTool {
|
|||
}
|
||||
|
||||
private String getTablesForSet(Connection conn, String name, Configuration conf)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
|
||||
List<TableName> tables = table.describeBackupSet(name);
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,11 +15,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -34,12 +32,12 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
public interface RestoreJob extends Configurable {
|
||||
/**
|
||||
* Run restore operation
|
||||
* @param dirPaths path array of WAL log directories
|
||||
* @param fromTables from tables
|
||||
* @param toTables to tables
|
||||
* @param dirPaths path array of WAL log directories
|
||||
* @param fromTables from tables
|
||||
* @param toTables to tables
|
||||
* @param fullBackupRestore full backup restore
|
||||
* @throws IOException if running the job fails
|
||||
*/
|
||||
void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables,
|
||||
boolean fullBackupRestore) throws IOException;
|
||||
void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, boolean fullBackupRestore)
|
||||
throws IOException;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -25,7 +25,6 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -57,7 +56,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
|||
public class BackupAdminImpl implements BackupAdmin {
|
||||
public final static String CHECK_OK = "Checking backup images: OK";
|
||||
public final static String CHECK_FAILED =
|
||||
"Checking backup images: Failed. Some dependencies are missing for restore";
|
||||
"Checking backup images: Failed. Some dependencies are missing for restore";
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BackupAdminImpl.class);
|
||||
|
||||
private final Connection conn;
|
||||
|
@ -107,8 +106,8 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
deleteSessionStarted = true;
|
||||
} catch (IOException e) {
|
||||
LOG.warn("You can not run delete command while active backup session is in progress. \n"
|
||||
+ "If there is no active backup session running, run backup repair utility to "
|
||||
+ "restore \nbackup system integrity.");
|
||||
+ "If there is no active backup session running, run backup repair utility to "
|
||||
+ "restore \nbackup system integrity.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -158,7 +157,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
BackupSystemTable.deleteSnapshot(conn);
|
||||
// We still have record with unfinished delete operation
|
||||
LOG.error("Delete operation failed, please run backup repair utility to restore "
|
||||
+ "backup system integrity", e);
|
||||
+ "backup system integrity", e);
|
||||
throw e;
|
||||
} else {
|
||||
LOG.warn("Delete operation succeeded, there were some errors: ", e);
|
||||
|
@ -177,15 +176,15 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
/**
|
||||
* Updates incremental backup set for every backupRoot
|
||||
* @param tablesMap map [backupRoot: {@code Set<TableName>}]
|
||||
* @param table backup system table
|
||||
* @param table backup system table
|
||||
* @throws IOException if a table operation fails
|
||||
*/
|
||||
private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
for (String backupRoot : tablesMap.keySet()) {
|
||||
Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
|
||||
Map<TableName, ArrayList<BackupInfo>> tableMap =
|
||||
table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
|
||||
table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
|
||||
for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) {
|
||||
if (entry.getValue() == null) {
|
||||
// No more backups for a table
|
||||
|
@ -283,10 +282,10 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
}
|
||||
|
||||
private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
List<TableName> tables = info.getTableNames();
|
||||
LOG.debug("Remove " + tn + " from " + info.getBackupId() + " tables="
|
||||
+ info.getTableListAsString());
|
||||
LOG.debug(
|
||||
"Remove " + tn + " from " + info.getBackupId() + " tables=" + info.getTableListAsString());
|
||||
if (tables.contains(tn)) {
|
||||
tables.remove(tn);
|
||||
|
||||
|
@ -306,7 +305,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
}
|
||||
|
||||
private List<BackupInfo> getAffectedBackupSessions(BackupInfo backupInfo, TableName tn,
|
||||
BackupSystemTable table) throws IOException {
|
||||
BackupSystemTable table) throws IOException {
|
||||
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
|
||||
long ts = backupInfo.getStartTs();
|
||||
List<BackupInfo> list = new ArrayList<>();
|
||||
|
@ -325,7 +324,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
list.clear();
|
||||
} else {
|
||||
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn
|
||||
+ " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
|
||||
+ " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
|
||||
list.add(info);
|
||||
}
|
||||
}
|
||||
|
@ -338,7 +337,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
* @throws IOException if cleaning up the backup directory fails
|
||||
*/
|
||||
private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
try {
|
||||
// clean up the data at target directory
|
||||
String targetDir = backupInfo.getBackupRootDir();
|
||||
|
@ -349,9 +348,8 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
|
||||
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
|
||||
|
||||
Path targetDirPath =
|
||||
new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(),
|
||||
backupInfo.getBackupId(), table));
|
||||
Path targetDirPath = new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(),
|
||||
backupInfo.getBackupId(), table));
|
||||
if (outputFs.delete(targetDirPath, true)) {
|
||||
LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
|
||||
} else {
|
||||
|
@ -359,13 +357,13 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
}
|
||||
} catch (IOException e1) {
|
||||
LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table
|
||||
+ "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
|
||||
+ "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
|
||||
throw e1;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
List<BackupInfo> history = table.getBackupHistory();
|
||||
for (BackupInfo info : history) {
|
||||
List<TableName> tables = info.getTableNames();
|
||||
|
@ -466,7 +464,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
public void addToBackupSet(String name, TableName[] tables) throws IOException {
|
||||
String[] tableNames = new String[tables.length];
|
||||
try (final BackupSystemTable table = new BackupSystemTable(conn);
|
||||
final Admin admin = conn.getAdmin()) {
|
||||
final Admin admin = conn.getAdmin()) {
|
||||
for (int i = 0; i < tables.length; i++) {
|
||||
tableNames[i] = tables[i].getNameAsString();
|
||||
if (!admin.tableExists(TableName.valueOf(tableNames[i]))) {
|
||||
|
@ -474,8 +472,8 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
}
|
||||
}
|
||||
table.addToBackupSet(name, tableNames);
|
||||
LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name
|
||||
+ "' backup set");
|
||||
LOG.info(
|
||||
"Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + "' backup set");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -484,8 +482,8 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'");
|
||||
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
|
||||
table.removeFromBackupSet(name, toStringArray(tables));
|
||||
LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name
|
||||
+ "' completed.");
|
||||
LOG.info(
|
||||
"Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "' completed.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -534,9 +532,9 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
}
|
||||
|
||||
if (incrTableSet.isEmpty()) {
|
||||
String msg = "Incremental backup table set contains no tables. "
|
||||
+ "You need to run full backup first "
|
||||
+ (tableList != null ? "on " + StringUtils.join(tableList, ",") : "");
|
||||
String msg =
|
||||
"Incremental backup table set contains no tables. " + "You need to run full backup first "
|
||||
+ (tableList != null ? "on " + StringUtils.join(tableList, ",") : "");
|
||||
|
||||
throw new IOException(msg);
|
||||
}
|
||||
|
@ -545,7 +543,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
if (!tableList.isEmpty()) {
|
||||
String extraTables = StringUtils.join(tableList, ",");
|
||||
String msg = "Some tables (" + extraTables + ") haven't gone through full backup. "
|
||||
+ "Perform full backup on " + extraTables + " first, " + "then retry the command";
|
||||
+ "Perform full backup on " + extraTables + " first, " + "then retry the command";
|
||||
throw new IOException(msg);
|
||||
}
|
||||
}
|
||||
|
@ -554,13 +552,13 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
if (tableList != null && !tableList.isEmpty()) {
|
||||
for (TableName table : tableList) {
|
||||
String targetTableBackupDir =
|
||||
HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
|
||||
HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
|
||||
Path targetTableBackupDirPath = new Path(targetTableBackupDir);
|
||||
FileSystem outputFs =
|
||||
FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration());
|
||||
FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration());
|
||||
if (outputFs.exists(targetTableBackupDirPath)) {
|
||||
throw new IOException("Target backup directory " + targetTableBackupDir
|
||||
+ " exists already.");
|
||||
throw new IOException(
|
||||
"Target backup directory " + targetTableBackupDir + " exists already.");
|
||||
}
|
||||
outputFs.mkdirs(targetTableBackupDirPath);
|
||||
}
|
||||
|
@ -581,8 +579,8 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
tableList = excludeNonExistingTables(tableList, nonExistingTableList);
|
||||
} else {
|
||||
// Throw exception only in full mode - we try to backup non-existing table
|
||||
throw new IOException("Non-existing tables found in the table list: "
|
||||
+ nonExistingTableList);
|
||||
throw new IOException(
|
||||
"Non-existing tables found in the table list: " + nonExistingTableList);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -590,9 +588,9 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
// update table list
|
||||
BackupRequest.Builder builder = new BackupRequest.Builder();
|
||||
request = builder.withBackupType(request.getBackupType()).withTableList(tableList)
|
||||
.withTargetRootDir(request.getTargetRootDir())
|
||||
.withBackupSetName(request.getBackupSetName()).withTotalTasks(request.getTotalTasks())
|
||||
.withBandwidthPerTasks((int) request.getBandwidth()).build();
|
||||
.withTargetRootDir(request.getTargetRootDir()).withBackupSetName(request.getBackupSetName())
|
||||
.withTotalTasks(request.getTotalTasks()).withBandwidthPerTasks((int) request.getBandwidth())
|
||||
.build();
|
||||
|
||||
TableBackupClient client;
|
||||
try {
|
||||
|
@ -608,7 +606,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
}
|
||||
|
||||
private List<TableName> excludeNonExistingTables(List<TableName> tableList,
|
||||
List<TableName> nonExistingTableList) {
|
||||
List<TableName> nonExistingTableList) {
|
||||
for (TableName table : nonExistingTableList) {
|
||||
tableList.remove(table);
|
||||
}
|
||||
|
@ -619,7 +617,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
public void mergeBackups(String[] backupIds) throws IOException {
|
||||
try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
|
||||
checkIfValidForMerge(backupIds, sysTable);
|
||||
//TODO run job on remote cluster
|
||||
// TODO run job on remote cluster
|
||||
BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration());
|
||||
job.run(backupIds);
|
||||
}
|
||||
|
@ -627,7 +625,6 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
|
||||
/**
|
||||
* Verifies that backup images are valid for merge.
|
||||
*
|
||||
* <ul>
|
||||
* <li>All backups MUST be in the same destination
|
||||
* <li>No FULL backups are allowed - only INCREMENTAL
|
||||
|
@ -636,11 +633,11 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
* </ul>
|
||||
* <p>
|
||||
* @param backupIds list of backup ids
|
||||
* @param table backup system table
|
||||
* @param table backup system table
|
||||
* @throws IOException if the backup image is not valid for merge
|
||||
*/
|
||||
private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
String backupRoot = null;
|
||||
|
||||
final Set<TableName> allTables = new HashSet<>();
|
||||
|
@ -656,7 +653,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
backupRoot = bInfo.getBackupRootDir();
|
||||
} else if (!bInfo.getBackupRootDir().equals(backupRoot)) {
|
||||
throw new IOException("Found different backup destinations in a list of a backup sessions "
|
||||
+ "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir());
|
||||
+ "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir());
|
||||
}
|
||||
if (bInfo.getType() == BackupType.FULL) {
|
||||
throw new IOException("FULL backup image can not be merged for: \n" + bInfo);
|
||||
|
@ -664,7 +661,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
|
||||
if (bInfo.getState() != BackupState.COMPLETE) {
|
||||
throw new IOException("Backup image " + backupId
|
||||
+ " can not be merged becuase of its state: " + bInfo.getState());
|
||||
+ " can not be merged becuase of its state: " + bInfo.getState());
|
||||
}
|
||||
allBackups.add(backupId);
|
||||
allTables.addAll(bInfo.getTableNames());
|
||||
|
@ -677,7 +674,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
}
|
||||
}
|
||||
|
||||
final long startRangeTime = minTime;
|
||||
final long startRangeTime = minTime;
|
||||
final long endRangeTime = maxTime;
|
||||
final String backupDest = backupRoot;
|
||||
// Check we have no 'holes' in backup id list
|
||||
|
@ -688,7 +685,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
|
||||
BackupInfo.Filter timeRangeFilter = info -> {
|
||||
long time = info.getStartTs();
|
||||
return time >= startRangeTime && time <= endRangeTime ;
|
||||
return time >= startRangeTime && time <= endRangeTime;
|
||||
};
|
||||
|
||||
BackupInfo.Filter tableFilter = info -> {
|
||||
|
@ -699,20 +696,20 @@ public class BackupAdminImpl implements BackupAdmin {
|
|||
BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL;
|
||||
BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE;
|
||||
|
||||
List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter,
|
||||
timeRangeFilter, tableFilter, typeFilter, stateFilter);
|
||||
List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter, timeRangeFilter,
|
||||
tableFilter, typeFilter, stateFilter);
|
||||
if (allInfos.size() != allBackups.size()) {
|
||||
// Yes we have at least one hole in backup image sequence
|
||||
// Yes we have at least one hole in backup image sequence
|
||||
List<String> missingIds = new ArrayList<>();
|
||||
for(BackupInfo info: allInfos) {
|
||||
if(allBackups.contains(info.getBackupId())) {
|
||||
for (BackupInfo info : allInfos) {
|
||||
if (allBackups.contains(info.getBackupId())) {
|
||||
continue;
|
||||
}
|
||||
missingIds.add(info.getBackupId());
|
||||
}
|
||||
String errMsg =
|
||||
"Sequence of backup ids has 'holes'. The following backup images must be added:" +
|
||||
org.apache.hadoop.util.StringUtils.join(",", missingIds);
|
||||
"Sequence of backup ids has 'holes'. The following backup images must be added:"
|
||||
+ org.apache.hadoop.util.StringUtils.join(",", missingIds);
|
||||
throw new IOException(errMsg);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.impl;
|
||||
|
||||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC;
|
||||
|
@ -44,7 +43,6 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_
|
|||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
|
@ -80,33 +78,32 @@ public final class BackupCommands {
|
|||
public final static String INCORRECT_USAGE = "Incorrect usage";
|
||||
|
||||
public final static String TOP_LEVEL_NOT_ALLOWED =
|
||||
"Top level (root) folder is not allowed to be a backup destination";
|
||||
"Top level (root) folder is not allowed to be a backup destination";
|
||||
|
||||
public static final String USAGE = "Usage: hbase backup COMMAND [command-specific arguments]\n"
|
||||
+ "where COMMAND is one of:\n" + " create create a new backup image\n"
|
||||
+ " delete delete an existing backup image\n"
|
||||
+ " describe show the detailed information of a backup image\n"
|
||||
+ " history show history of all successful backups\n"
|
||||
+ " progress show the progress of the latest backup request\n"
|
||||
+ " set backup set management\n"
|
||||
+ " repair repair backup system table\n"
|
||||
+ " merge merge backup images\n"
|
||||
+ "Run \'hbase backup COMMAND -h\' to see help message for each command\n";
|
||||
+ "where COMMAND is one of:\n" + " create create a new backup image\n"
|
||||
+ " delete delete an existing backup image\n"
|
||||
+ " describe show the detailed information of a backup image\n"
|
||||
+ " history show history of all successful backups\n"
|
||||
+ " progress show the progress of the latest backup request\n"
|
||||
+ " set backup set management\n" + " repair repair backup system table\n"
|
||||
+ " merge merge backup images\n"
|
||||
+ "Run \'hbase backup COMMAND -h\' to see help message for each command\n";
|
||||
|
||||
public static final String CREATE_CMD_USAGE =
|
||||
"Usage: hbase backup create <type> <backup_path> [options]\n"
|
||||
+ " type \"full\" to create a full backup image\n"
|
||||
+ " \"incremental\" to create an incremental backup image\n"
|
||||
+ " backup_path Full path to store the backup image\n";
|
||||
"Usage: hbase backup create <type> <backup_path> [options]\n"
|
||||
+ " type \"full\" to create a full backup image\n"
|
||||
+ " \"incremental\" to create an incremental backup image\n"
|
||||
+ " backup_path Full path to store the backup image\n";
|
||||
|
||||
public static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress <backup_id>\n"
|
||||
+ " backup_id Backup image id (optional). If no id specified, the command will show\n"
|
||||
+ " progress for currently running backup session.";
|
||||
+ " backup_id Backup image id (optional). If no id specified, the command will show\n"
|
||||
+ " progress for currently running backup session.";
|
||||
public static final String NO_INFO_FOUND = "No info was found for backup id: ";
|
||||
public static final String NO_ACTIVE_SESSION_FOUND = "No active backup sessions found.";
|
||||
|
||||
public static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup describe <backup_id>\n"
|
||||
+ " backup_id Backup image id\n";
|
||||
public static final String DESCRIBE_CMD_USAGE =
|
||||
"Usage: hbase backup describe <backup_id>\n" + " backup_id Backup image id\n";
|
||||
|
||||
public static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [options]";
|
||||
|
||||
|
@ -115,14 +112,13 @@ public final class BackupCommands {
|
|||
public static final String REPAIR_CMD_USAGE = "Usage: hbase backup repair\n";
|
||||
|
||||
public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n"
|
||||
+ " name Backup set name\n"
|
||||
+ " tables Comma separated list of tables.\n" + "COMMAND is one of:\n"
|
||||
+ " add add tables to a set, create a set if needed\n"
|
||||
+ " remove remove tables from a set\n"
|
||||
+ " list list all backup sets in the system\n"
|
||||
+ " describe describe set\n" + " delete delete backup set\n";
|
||||
+ " name Backup set name\n" + " tables Comma separated list of tables.\n"
|
||||
+ "COMMAND is one of:\n" + " add add tables to a set, create a set if needed\n"
|
||||
+ " remove remove tables from a set\n"
|
||||
+ " list list all backup sets in the system\n" + " describe describe set\n"
|
||||
+ " delete delete backup set\n";
|
||||
public static final String MERGE_CMD_USAGE = "Usage: hbase backup merge [backup_ids]\n"
|
||||
+ " backup_ids Comma separated list of backup image ids.\n";
|
||||
+ " backup_ids Comma separated list of backup image ids.\n";
|
||||
|
||||
public static final String USAGE_FOOTER = "";
|
||||
|
||||
|
@ -281,8 +277,10 @@ public final class BackupCommands {
|
|||
throw new IOException(INCORRECT_USAGE);
|
||||
}
|
||||
|
||||
if (!BackupType.FULL.toString().equalsIgnoreCase(args[1])
|
||||
&& !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])) {
|
||||
if (
|
||||
!BackupType.FULL.toString().equalsIgnoreCase(args[1])
|
||||
&& !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])
|
||||
) {
|
||||
System.out.println("ERROR: invalid backup type: " + args[1]);
|
||||
printUsage();
|
||||
throw new IOException(INCORRECT_USAGE);
|
||||
|
@ -301,8 +299,8 @@ public final class BackupCommands {
|
|||
|
||||
// Check if we have both: backup set and list of tables
|
||||
if (cmdline.hasOption(OPTION_TABLE) && cmdline.hasOption(OPTION_SET)) {
|
||||
System.out.println("ERROR: You can specify either backup set or list"
|
||||
+ " of tables, but not both");
|
||||
System.out
|
||||
.println("ERROR: You can specify either backup set or list" + " of tables, but not both");
|
||||
printUsage();
|
||||
throw new IOException(INCORRECT_USAGE);
|
||||
}
|
||||
|
@ -315,20 +313,20 @@ public final class BackupCommands {
|
|||
tables = getTablesForSet(setName, getConf());
|
||||
|
||||
if (tables == null) {
|
||||
System.out.println("ERROR: Backup set '" + setName
|
||||
+ "' is either empty or does not exist");
|
||||
System.out
|
||||
.println("ERROR: Backup set '" + setName + "' is either empty or does not exist");
|
||||
printUsage();
|
||||
throw new IOException(INCORRECT_USAGE);
|
||||
}
|
||||
} else {
|
||||
tables = cmdline.getOptionValue(OPTION_TABLE);
|
||||
}
|
||||
int bandwidth =
|
||||
cmdline.hasOption(OPTION_BANDWIDTH) ? Integer.parseInt(cmdline
|
||||
.getOptionValue(OPTION_BANDWIDTH)) : -1;
|
||||
int workers =
|
||||
cmdline.hasOption(OPTION_WORKERS) ? Integer.parseInt(cmdline
|
||||
.getOptionValue(OPTION_WORKERS)) : -1;
|
||||
int bandwidth = cmdline.hasOption(OPTION_BANDWIDTH)
|
||||
? Integer.parseInt(cmdline.getOptionValue(OPTION_BANDWIDTH))
|
||||
: -1;
|
||||
int workers = cmdline.hasOption(OPTION_WORKERS)
|
||||
? Integer.parseInt(cmdline.getOptionValue(OPTION_WORKERS))
|
||||
: -1;
|
||||
|
||||
if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) {
|
||||
String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME);
|
||||
|
@ -338,13 +336,11 @@ public final class BackupCommands {
|
|||
|
||||
try (BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
||||
BackupRequest.Builder builder = new BackupRequest.Builder();
|
||||
BackupRequest request =
|
||||
builder
|
||||
.withBackupType(BackupType.valueOf(args[1].toUpperCase()))
|
||||
.withTableList(
|
||||
tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null)
|
||||
.withTargetRootDir(targetBackupDir).withTotalTasks(workers)
|
||||
.withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build();
|
||||
BackupRequest request = builder.withBackupType(BackupType.valueOf(args[1].toUpperCase()))
|
||||
.withTableList(
|
||||
tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null)
|
||||
.withTargetRootDir(targetBackupDir).withTotalTasks(workers)
|
||||
.withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build();
|
||||
String backupId = admin.backupTables(request);
|
||||
System.out.println("Backup session " + backupId + " finished. Status: SUCCESS");
|
||||
} catch (IOException e) {
|
||||
|
@ -506,8 +502,8 @@ public final class BackupCommands {
|
|||
public void execute() throws IOException {
|
||||
|
||||
if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 1) {
|
||||
System.out.println("No backup id was specified, "
|
||||
+ "will retrieve the most recent (ongoing) session");
|
||||
System.out.println(
|
||||
"No backup id was specified, " + "will retrieve the most recent (ongoing) session");
|
||||
}
|
||||
String[] args = cmdline == null ? null : cmdline.getArgs();
|
||||
if (args != null && args.length > 2) {
|
||||
|
@ -601,15 +597,15 @@ public final class BackupCommands {
|
|||
};
|
||||
List<BackupInfo> history = null;
|
||||
try (final BackupSystemTable sysTable = new BackupSystemTable(conn);
|
||||
BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
||||
BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
||||
history = sysTable.getBackupHistory(-1, dateFilter);
|
||||
String[] backupIds = convertToBackupIds(history);
|
||||
int deleted = admin.deleteBackups(backupIds);
|
||||
System.out.println("Deleted " + deleted + " backups. Total older than " + days + " days: "
|
||||
+ backupIds.length);
|
||||
+ backupIds.length);
|
||||
} catch (IOException e) {
|
||||
System.err.println("Delete command FAILED. Please run backup repair tool to restore backup "
|
||||
+ "system integrity");
|
||||
+ "system integrity");
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
@ -631,7 +627,7 @@ public final class BackupCommands {
|
|||
System.out.println("Deleted " + deleted + " backups. Total requested: " + backupIds.length);
|
||||
} catch (IOException e) {
|
||||
System.err.println("Delete command FAILED. Please run backup repair tool to restore backup "
|
||||
+ "system integrity");
|
||||
+ "system integrity");
|
||||
throw e;
|
||||
}
|
||||
|
||||
|
@ -673,14 +669,14 @@ public final class BackupCommands {
|
|||
|
||||
Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
|
||||
try (final Connection conn = ConnectionFactory.createConnection(conf);
|
||||
final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
|
||||
final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
|
||||
// Failed backup
|
||||
BackupInfo backupInfo;
|
||||
List<BackupInfo> list = sysTable.getBackupInfos(BackupState.RUNNING);
|
||||
if (list.size() == 0) {
|
||||
// No failed sessions found
|
||||
System.out.println("REPAIR status: no failed sessions found."
|
||||
+ " Checking failed delete backup operation ...");
|
||||
+ " Checking failed delete backup operation ...");
|
||||
repairFailedBackupDeletionIfAny(conn, sysTable);
|
||||
repairFailedBackupMergeIfAny(conn, sysTable);
|
||||
return;
|
||||
|
@ -694,10 +690,9 @@ public final class BackupCommands {
|
|||
// set overall backup status: failed
|
||||
backupInfo.setState(BackupState.FAILED);
|
||||
// compose the backup failed data
|
||||
String backupFailedData =
|
||||
"BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs()
|
||||
+ ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
|
||||
+ backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
|
||||
String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts="
|
||||
+ backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
|
||||
+ backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
|
||||
System.out.println(backupFailedData);
|
||||
TableBackupClient.cleanupAndRestoreBackupSystem(conn, backupInfo, conf);
|
||||
// If backup session is updated to FAILED state - means we
|
||||
|
@ -709,7 +704,7 @@ public final class BackupCommands {
|
|||
}
|
||||
|
||||
private void repairFailedBackupDeletionIfAny(Connection conn, BackupSystemTable sysTable)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
String[] backupIds = sysTable.getListOfBackupIdsFromDeleteOperation();
|
||||
if (backupIds == null || backupIds.length == 0) {
|
||||
System.out.println("No failed backup DELETE operation found");
|
||||
|
@ -730,7 +725,7 @@ public final class BackupCommands {
|
|||
}
|
||||
|
||||
public static void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTable sysTable)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
|
||||
String[] backupIds = sysTable.getListOfBackupIdsFromMergeOperation();
|
||||
if (backupIds == null || backupIds.length == 0) {
|
||||
|
@ -754,9 +749,11 @@ public final class BackupCommands {
|
|||
}
|
||||
boolean res = fs.rename(tmpPath, destPath);
|
||||
if (!res) {
|
||||
throw new IOException("MERGE repair: failed to rename from "+ tmpPath+" to "+ destPath);
|
||||
throw new IOException(
|
||||
"MERGE repair: failed to rename from " + tmpPath + " to " + destPath);
|
||||
}
|
||||
System.out.println("MERGE repair: renamed from "+ tmpPath+" to "+ destPath+" res="+ res);
|
||||
System.out
|
||||
.println("MERGE repair: renamed from " + tmpPath + " to " + destPath + " res=" + res);
|
||||
} else {
|
||||
checkRemoveBackupImages(fs, backupRoot, backupIds);
|
||||
}
|
||||
|
@ -773,16 +770,16 @@ public final class BackupCommands {
|
|||
private static void checkRemoveBackupImages(FileSystem fs, String backupRoot,
|
||||
String[] backupIds) throws IOException {
|
||||
String mergedBackupId = BackupUtils.findMostRecentBackupId(backupIds);
|
||||
for (String backupId: backupIds) {
|
||||
for (String backupId : backupIds) {
|
||||
if (backupId.equals(mergedBackupId)) {
|
||||
continue;
|
||||
}
|
||||
Path path = HBackupFileSystem.getBackupPath(backupRoot, backupId);
|
||||
if (fs.exists(path)) {
|
||||
if (!fs.delete(path, true)) {
|
||||
System.out.println("MERGE repair removing: "+ path +" - FAILED");
|
||||
System.out.println("MERGE repair removing: " + path + " - FAILED");
|
||||
} else {
|
||||
System.out.println("MERGE repair removing: "+ path +" - OK");
|
||||
System.out.println("MERGE repair removing: " + path + " - OK");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -816,23 +813,23 @@ public final class BackupCommands {
|
|||
|
||||
String[] args = cmdline == null ? null : cmdline.getArgs();
|
||||
if (args == null || (args.length != 2)) {
|
||||
System.err.println("ERROR: wrong number of arguments: "
|
||||
+ (args == null ? null : args.length));
|
||||
System.err
|
||||
.println("ERROR: wrong number of arguments: " + (args == null ? null : args.length));
|
||||
printUsage();
|
||||
throw new IOException(INCORRECT_USAGE);
|
||||
}
|
||||
|
||||
String[] backupIds = args[1].split(",");
|
||||
if (backupIds.length < 2) {
|
||||
String msg = "ERROR: can not merge a single backup image. "+
|
||||
"Number of images must be greater than 1.";
|
||||
String msg = "ERROR: can not merge a single backup image. "
|
||||
+ "Number of images must be greater than 1.";
|
||||
System.err.println(msg);
|
||||
throw new IOException(msg);
|
||||
|
||||
}
|
||||
Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
|
||||
try (final Connection conn = ConnectionFactory.createConnection(conf);
|
||||
final BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
||||
final BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
||||
admin.mergeBackups(backupIds);
|
||||
}
|
||||
}
|
||||
|
@ -889,7 +886,7 @@ public final class BackupCommands {
|
|||
} else {
|
||||
// load from backup FS
|
||||
history =
|
||||
BackupUtils.getHistory(getConf(), n, backupRootPath, tableNameFilter, tableSetFilter);
|
||||
BackupUtils.getHistory(getConf(), n, backupRootPath, tableNameFilter, tableSetFilter);
|
||||
}
|
||||
for (BackupInfo info : history) {
|
||||
System.out.println(info.getShortDescription());
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.impl;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
|
@ -48,7 +47,7 @@ public class BackupException extends HBaseIOException {
|
|||
|
||||
/**
|
||||
* Exception for the given backup that has no previous root cause
|
||||
* @param msg reason why the backup failed
|
||||
* @param msg reason why the backup failed
|
||||
* @param desc description of the backup that is being failed
|
||||
*/
|
||||
public BackupException(String msg, BackupInfo desc) {
|
||||
|
@ -58,9 +57,9 @@ public class BackupException extends HBaseIOException {
|
|||
|
||||
/**
|
||||
* Exception for the given backup due to another exception
|
||||
* @param msg reason why the backup failed
|
||||
* @param msg reason why the backup failed
|
||||
* @param cause root cause of the failure
|
||||
* @param desc description of the backup that is being failed
|
||||
* @param desc description of the backup that is being failed
|
||||
*/
|
||||
public BackupException(String msg, Throwable cause, BackupInfo desc) {
|
||||
super(msg, cause);
|
||||
|
@ -68,10 +67,9 @@ public class BackupException extends HBaseIOException {
|
|||
}
|
||||
|
||||
/**
|
||||
* Exception when the description of the backup cannot be determined, due to some other root
|
||||
* cause
|
||||
* Exception when the description of the backup cannot be determined, due to some other root cause
|
||||
* @param message description of what caused the failure
|
||||
* @param e root cause
|
||||
* @param e root cause
|
||||
*/
|
||||
public BackupException(String message, Exception e) {
|
||||
super(message, e);
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -60,7 +59,7 @@ import org.slf4j.LoggerFactory;
|
|||
public class BackupManager implements Closeable {
|
||||
// in seconds
|
||||
public final static String BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY =
|
||||
"hbase.backup.exclusive.op.timeout.seconds";
|
||||
"hbase.backup.exclusive.op.timeout.seconds";
|
||||
// In seconds
|
||||
private final static int DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT = 3600;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BackupManager.class);
|
||||
|
@ -77,10 +76,12 @@ public class BackupManager implements Closeable {
|
|||
* @throws IOException exception
|
||||
*/
|
||||
public BackupManager(Connection conn, Configuration conf) throws IOException {
|
||||
if (!conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
|
||||
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) {
|
||||
if (
|
||||
!conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
|
||||
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)
|
||||
) {
|
||||
throw new BackupException("HBase backup is not enabled. Check your "
|
||||
+ BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting.");
|
||||
+ BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting.");
|
||||
}
|
||||
this.conf = conf;
|
||||
this.conn = conn;
|
||||
|
@ -120,12 +121,13 @@ public class BackupManager implements Closeable {
|
|||
}
|
||||
|
||||
plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
|
||||
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, (plugins == null ? "" : plugins + ",") +
|
||||
BackupHFileCleaner.class.getName());
|
||||
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
|
||||
(plugins == null ? "" : plugins + ",") + BackupHFileCleaner.class.getName());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Added log cleaner: {}. Added master procedure manager: {}."
|
||||
+"Added master procedure manager: {}", cleanerClass, masterProcedureClass,
|
||||
BackupHFileCleaner.class.getName());
|
||||
LOG.debug(
|
||||
"Added log cleaner: {}. Added master procedure manager: {}."
|
||||
+ "Added master procedure manager: {}",
|
||||
cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,8 +165,7 @@ public class BackupManager implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get configuration
|
||||
* @return configuration
|
||||
* Get configuration n
|
||||
*/
|
||||
Configuration getConf() {
|
||||
return conf;
|
||||
|
@ -186,17 +187,15 @@ public class BackupManager implements Closeable {
|
|||
|
||||
/**
|
||||
* Creates a backup info based on input backup request.
|
||||
* @param backupId backup id
|
||||
* @param type type
|
||||
* @param tableList table list
|
||||
* @param backupId backup id
|
||||
* @param type type
|
||||
* @param tableList table list
|
||||
* @param targetRootDir root dir
|
||||
* @param workers number of parallel workers
|
||||
* @param bandwidth bandwidth per worker in MB per sec
|
||||
* @return BackupInfo
|
||||
* @throws BackupException exception
|
||||
* @param workers number of parallel workers
|
||||
* @param bandwidth bandwidth per worker in MB per sec n * @throws BackupException exception
|
||||
*/
|
||||
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList,
|
||||
String targetRootDir, int workers, long bandwidth) throws BackupException {
|
||||
String targetRootDir, int workers, long bandwidth) throws BackupException {
|
||||
if (targetRootDir == null) {
|
||||
throw new BackupException("Wrong backup request parameter: target backup root directory");
|
||||
}
|
||||
|
@ -292,8 +291,8 @@ public class BackupManager implements Closeable {
|
|||
BackupImage.Builder builder = BackupImage.newBuilder();
|
||||
|
||||
BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
||||
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
|
||||
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
||||
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
|
||||
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
||||
|
||||
// Only direct ancestors for a backup are required and not entire history of backup for this
|
||||
// table resulting in verifying all of the previous backups which is unnecessary and backup
|
||||
|
@ -320,21 +319,21 @@ public class BackupManager implements Closeable {
|
|||
if (BackupManifest.canCoverImage(ancestors, image)) {
|
||||
LOG.debug("Met the backup boundary of the current table set:");
|
||||
for (BackupImage image1 : ancestors) {
|
||||
LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir());
|
||||
LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir());
|
||||
}
|
||||
} else {
|
||||
Path logBackupPath =
|
||||
HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId());
|
||||
LOG.debug("Current backup has an incremental backup ancestor, "
|
||||
+ "touching its image manifest in {}"
|
||||
+ " to construct the dependency.", logBackupPath.toString());
|
||||
HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId());
|
||||
LOG.debug(
|
||||
"Current backup has an incremental backup ancestor, "
|
||||
+ "touching its image manifest in {}" + " to construct the dependency.",
|
||||
logBackupPath.toString());
|
||||
BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath);
|
||||
BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
|
||||
ancestors.add(lastIncrImage);
|
||||
|
||||
LOG.debug(
|
||||
"Last dependent incremental backup image: {BackupID={}" +
|
||||
"BackupDir={}}", lastIncrImage.getBackupId(), lastIncrImage.getRootDir());
|
||||
LOG.debug("Last dependent incremental backup image: {BackupID={}" + "BackupDir={}}",
|
||||
lastIncrImage.getBackupId(), lastIncrImage.getRootDir());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -345,12 +344,12 @@ public class BackupManager implements Closeable {
|
|||
/**
|
||||
* Get the direct ancestors of this backup for one table involved.
|
||||
* @param backupInfo backup info
|
||||
* @param table table
|
||||
* @param table table
|
||||
* @return backupImages on the dependency list
|
||||
* @throws IOException exception
|
||||
*/
|
||||
public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo, TableName table)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
ArrayList<BackupImage> ancestors = getAncestors(backupInfo);
|
||||
ArrayList<BackupImage> tableAncestors = new ArrayList<>();
|
||||
for (BackupImage image : ancestors) {
|
||||
|
@ -399,11 +398,13 @@ public class BackupManager implements Closeable {
|
|||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
if (lastWarningOutputTime == 0
|
||||
|| (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000) {
|
||||
if (
|
||||
lastWarningOutputTime == 0
|
||||
|| (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000
|
||||
) {
|
||||
lastWarningOutputTime = EnvironmentEdgeManager.currentTime();
|
||||
LOG.warn("Waiting to acquire backup exclusive lock for {}s",
|
||||
+(lastWarningOutputTime - startTime) / 1000);
|
||||
+(lastWarningOutputTime - startTime) / 1000);
|
||||
}
|
||||
} else {
|
||||
throw e;
|
||||
|
@ -480,8 +481,8 @@ public class BackupManager implements Closeable {
|
|||
* @param tables tables
|
||||
* @throws IOException exception
|
||||
*/
|
||||
public void writeRegionServerLogTimestamp(Set<TableName> tables,
|
||||
Map<String, Long> newTimestamps) throws IOException {
|
||||
public void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String, Long> newTimestamps)
|
||||
throws IOException {
|
||||
systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, backupInfo.getBackupRootDir());
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.impl;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -26,7 +25,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -50,9 +48,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
|||
/**
|
||||
* Backup manifest contains all the meta data of a backup image. The manifest info will be bundled
|
||||
* as manifest file together with data. So that each backup image will contain all the info needed
|
||||
* for restore. BackupManifest is a storage container for BackupImage.
|
||||
* It is responsible for storing/reading backup image data and has some additional utility methods.
|
||||
*
|
||||
* for restore. BackupManifest is a storage container for BackupImage. It is responsible for
|
||||
* storing/reading backup image data and has some additional utility methods.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class BackupManifest {
|
||||
|
@ -126,8 +123,8 @@ public class BackupManifest {
|
|||
super();
|
||||
}
|
||||
|
||||
private BackupImage(String backupId, BackupType type, String rootDir,
|
||||
List<TableName> tableList, long startTs, long completeTs) {
|
||||
private BackupImage(String backupId, BackupType type, String rootDir, List<TableName> tableList,
|
||||
long startTs, long completeTs) {
|
||||
this.backupId = backupId;
|
||||
this.type = type;
|
||||
this.rootDir = rootDir;
|
||||
|
@ -149,9 +146,9 @@ public class BackupManifest {
|
|||
|
||||
List<BackupProtos.BackupImage> ancestorList = im.getAncestorsList();
|
||||
|
||||
BackupType type =
|
||||
im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL
|
||||
: BackupType.INCREMENTAL;
|
||||
BackupType type = im.getBackupType() == BackupProtos.BackupType.FULL
|
||||
? BackupType.FULL
|
||||
: BackupType.INCREMENTAL;
|
||||
|
||||
BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
|
||||
for (BackupProtos.BackupImage img : ancestorList) {
|
||||
|
@ -187,8 +184,8 @@ public class BackupManifest {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
private static Map<TableName, Map<String, Long>> loadIncrementalTimestampMap(
|
||||
BackupProtos.BackupImage proto) {
|
||||
private static Map<TableName, Map<String, Long>>
|
||||
loadIncrementalTimestampMap(BackupProtos.BackupImage proto) {
|
||||
List<BackupProtos.TableServerTimestamp> list = proto.getTstMapList();
|
||||
|
||||
Map<TableName, Map<String, Long>> incrTimeRanges = new HashMap<>();
|
||||
|
@ -221,13 +218,13 @@ public class BackupManifest {
|
|||
TableName key = entry.getKey();
|
||||
Map<String, Long> value = entry.getValue();
|
||||
BackupProtos.TableServerTimestamp.Builder tstBuilder =
|
||||
BackupProtos.TableServerTimestamp.newBuilder();
|
||||
BackupProtos.TableServerTimestamp.newBuilder();
|
||||
tstBuilder.setTableName(ProtobufUtil.toProtoTableName(key));
|
||||
|
||||
for (Map.Entry<String, Long> entry2 : value.entrySet()) {
|
||||
String s = entry2.getKey();
|
||||
BackupProtos.ServerTimestamp.Builder stBuilder =
|
||||
BackupProtos.ServerTimestamp.newBuilder();
|
||||
BackupProtos.ServerTimestamp.newBuilder();
|
||||
HBaseProtos.ServerName.Builder snBuilder = HBaseProtos.ServerName.newBuilder();
|
||||
ServerName sn = ServerName.parseServerName(s);
|
||||
snBuilder.setHostName(sn.getHostname());
|
||||
|
@ -378,10 +375,9 @@ public class BackupManifest {
|
|||
*/
|
||||
public BackupManifest(BackupInfo backup) {
|
||||
BackupImage.Builder builder = BackupImage.newBuilder();
|
||||
this.backupImage =
|
||||
builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
||||
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
|
||||
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
||||
this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
||||
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
|
||||
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -393,16 +389,14 @@ public class BackupManifest {
|
|||
List<TableName> tables = new ArrayList<TableName>();
|
||||
tables.add(table);
|
||||
BackupImage.Builder builder = BackupImage.newBuilder();
|
||||
this.backupImage =
|
||||
builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
||||
.withRootDir(backup.getBackupRootDir()).withTableList(tables)
|
||||
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
||||
this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
||||
.withRootDir(backup.getBackupRootDir()).withTableList(tables)
|
||||
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct manifest from a backup directory.
|
||||
*
|
||||
* @param conf configuration
|
||||
* @param conf configuration
|
||||
* @param backupPath backup path
|
||||
* @throws IOException if constructing the manifest from the backup directory fails
|
||||
*/
|
||||
|
@ -412,7 +406,7 @@ public class BackupManifest {
|
|||
|
||||
/**
|
||||
* Construct manifest from a backup directory.
|
||||
* @param fs the FileSystem
|
||||
* @param fs the FileSystem
|
||||
* @param backupPath backup path
|
||||
* @throws BackupException exception
|
||||
*/
|
||||
|
@ -449,7 +443,7 @@ public class BackupManifest {
|
|||
}
|
||||
this.backupImage = BackupImage.fromProto(proto);
|
||||
LOG.debug("Loaded manifest instance from manifest file: "
|
||||
+ BackupUtils.getPath(subFile.getPath()));
|
||||
+ BackupUtils.getPath(subFile.getPath()));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -480,10 +474,10 @@ public class BackupManifest {
|
|||
byte[] data = backupImage.toProto().toByteArray();
|
||||
// write the file, overwrite if already exist
|
||||
Path manifestFilePath =
|
||||
new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(),
|
||||
backupImage.getBackupId()), MANIFEST_FILE_NAME);
|
||||
new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), backupImage.getBackupId()),
|
||||
MANIFEST_FILE_NAME);
|
||||
try (FSDataOutputStream out =
|
||||
manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) {
|
||||
manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) {
|
||||
out.write(data);
|
||||
} catch (IOException e) {
|
||||
throw new BackupException(e.getMessage());
|
||||
|
@ -531,8 +525,8 @@ public class BackupManifest {
|
|||
for (BackupImage image : backupImage.getAncestors()) {
|
||||
restoreImages.put(Long.valueOf(image.startTs), image);
|
||||
}
|
||||
return new ArrayList<>(reverse ? (restoreImages.descendingMap().values())
|
||||
: (restoreImages.values()));
|
||||
return new ArrayList<>(
|
||||
reverse ? (restoreImages.descendingMap().values()) : (restoreImages.values()));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -614,7 +608,7 @@ public class BackupManifest {
|
|||
/**
|
||||
* Check whether backup image set could cover a backup image or not.
|
||||
* @param fullImages The backup image set
|
||||
* @param image The target backup image
|
||||
* @param image The target backup image
|
||||
* @return true if fullImages can cover image, otherwise false
|
||||
*/
|
||||
public static boolean canCoverImage(ArrayList<BackupImage> fullImages, BackupImage image) {
|
||||
|
@ -664,8 +658,8 @@ public class BackupManifest {
|
|||
info.setStartTs(backupImage.getStartTs());
|
||||
info.setBackupRootDir(backupImage.getRootDir());
|
||||
if (backupImage.getType() == BackupType.INCREMENTAL) {
|
||||
info.setHLogTargetDir(BackupUtils.getLogBackupDir(backupImage.getRootDir(),
|
||||
backupImage.getBackupId()));
|
||||
info.setHLogTargetDir(
|
||||
BackupUtils.getLogBackupDir(backupImage.getRootDir(), backupImage.getBackupId()));
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -69,6 +68,7 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||
|
||||
|
@ -232,7 +232,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
long TIMEOUT = 60000;
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
LOG.debug("Backup table {} is not present and available, waiting for it to become so",
|
||||
tableName);
|
||||
tableName);
|
||||
while (!admin.tableExists(tableName) || !admin.isTableAvailable(tableName)) {
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
|
@ -274,15 +274,17 @@ public final class BackupSystemTable implements Closeable {
|
|||
Map<byte[], String> readBulkLoadedFiles(String backupId) throws IOException {
|
||||
Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
|
||||
try (Table table = connection.getTable(bulkLoadTableName);
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
Result res = null;
|
||||
Map<byte[], String> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||
while ((res = scanner.next()) != null) {
|
||||
res.advance();
|
||||
byte[] row = CellUtil.cloneRow(res.listCells().get(0));
|
||||
for (Cell cell : res.listCells()) {
|
||||
if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
||||
BackupSystemTable.PATH_COL.length) == 0) {
|
||||
if (
|
||||
CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
||||
BackupSystemTable.PATH_COL.length) == 0
|
||||
) {
|
||||
map.put(row, Bytes.toString(CellUtil.cloneValue(cell)));
|
||||
}
|
||||
}
|
||||
|
@ -298,11 +300,11 @@ public final class BackupSystemTable implements Closeable {
|
|||
* @return array of Map of family to List of Paths
|
||||
*/
|
||||
public Map<byte[], List<Path>>[] readBulkLoadedFiles(String backupId, List<TableName> sTableList)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
|
||||
Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()];
|
||||
try (Table table = connection.getTable(bulkLoadTableName);
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
Result res = null;
|
||||
while ((res = scanner.next()) != null) {
|
||||
res.advance();
|
||||
|
@ -310,14 +312,20 @@ public final class BackupSystemTable implements Closeable {
|
|||
byte[] fam = null;
|
||||
String path = null;
|
||||
for (Cell cell : res.listCells()) {
|
||||
if (CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
|
||||
BackupSystemTable.TBL_COL.length) == 0) {
|
||||
if (
|
||||
CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
|
||||
BackupSystemTable.TBL_COL.length) == 0
|
||||
) {
|
||||
tbl = TableName.valueOf(CellUtil.cloneValue(cell));
|
||||
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
|
||||
BackupSystemTable.FAM_COL.length) == 0) {
|
||||
} else if (
|
||||
CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
|
||||
BackupSystemTable.FAM_COL.length) == 0
|
||||
) {
|
||||
fam = CellUtil.cloneValue(cell);
|
||||
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
||||
BackupSystemTable.PATH_COL.length) == 0) {
|
||||
} else if (
|
||||
CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
||||
BackupSystemTable.PATH_COL.length) == 0
|
||||
) {
|
||||
path = Bytes.toString(CellUtil.cloneValue(cell));
|
||||
}
|
||||
}
|
||||
|
@ -368,7 +376,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
* @param finalPaths family and associated hfiles
|
||||
*/
|
||||
public void writePathsPostBulkLoad(TableName tabName, byte[] region,
|
||||
Map<byte[], List<Path>> finalPaths) throws IOException {
|
||||
Map<byte[], List<Path>> finalPaths) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("write bulk load descriptor to backup " + tabName + " with " + finalPaths.size()
|
||||
+ " entries");
|
||||
|
@ -388,14 +396,14 @@ public final class BackupSystemTable implements Closeable {
|
|||
* @param pairs list of paths for hfiles
|
||||
*/
|
||||
public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, final byte[] family,
|
||||
final List<Pair<Path, Path>> pairs) throws IOException {
|
||||
final List<Pair<Path, Path>> pairs) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(
|
||||
"write bulk load descriptor to backup " + tabName + " with " + pairs.size() + " entries");
|
||||
}
|
||||
try (Table table = connection.getTable(bulkLoadTableName)) {
|
||||
List<Put> puts =
|
||||
BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs);
|
||||
BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs);
|
||||
table.put(puts);
|
||||
LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName);
|
||||
}
|
||||
|
@ -434,7 +442,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
Scan scan = BackupSystemTable.createScanForOrigBulkLoadedFiles(tTable);
|
||||
Map<String, Map<String, List<Pair<String, Boolean>>>> tblMap = map.get(tTable);
|
||||
try (Table table = connection.getTable(bulkLoadTableName);
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
Result res = null;
|
||||
while ((res = scanner.next()) != null) {
|
||||
res.advance();
|
||||
|
@ -448,14 +456,20 @@ public final class BackupSystemTable implements Closeable {
|
|||
rows.add(row);
|
||||
String rowStr = Bytes.toString(row);
|
||||
region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr);
|
||||
if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
|
||||
BackupSystemTable.FAM_COL.length) == 0) {
|
||||
if (
|
||||
CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
|
||||
BackupSystemTable.FAM_COL.length) == 0
|
||||
) {
|
||||
fam = Bytes.toString(CellUtil.cloneValue(cell));
|
||||
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
||||
BackupSystemTable.PATH_COL.length) == 0) {
|
||||
} else if (
|
||||
CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
||||
BackupSystemTable.PATH_COL.length) == 0
|
||||
) {
|
||||
path = Bytes.toString(CellUtil.cloneValue(cell));
|
||||
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
|
||||
BackupSystemTable.STATE_COL.length) == 0) {
|
||||
} else if (
|
||||
CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
|
||||
BackupSystemTable.STATE_COL.length) == 0
|
||||
) {
|
||||
byte[] state = CellUtil.cloneValue(cell);
|
||||
if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) {
|
||||
raw = true;
|
||||
|
@ -489,7 +503,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
* @param backupId the backup Id
|
||||
*/
|
||||
public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Path>>[] maps,
|
||||
String backupId) throws IOException {
|
||||
String backupId) throws IOException {
|
||||
try (Table table = connection.getTable(bulkLoadTableName)) {
|
||||
long ts = EnvironmentEdgeManager.currentTime();
|
||||
int cnt = 0;
|
||||
|
@ -566,7 +580,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
|
||||
/**
|
||||
* Write the start code (timestamp) to backup system table. If passed in null, then write 0 byte.
|
||||
* @param startCode start code
|
||||
* @param startCode start code
|
||||
* @param backupRoot root directory path to backup
|
||||
* @throws IOException exception
|
||||
*/
|
||||
|
@ -583,7 +597,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
/**
|
||||
* Exclusive operations are: create, delete, merge
|
||||
* @throws IOException if a table operation fails or an active backup exclusive operation is
|
||||
* already underway
|
||||
* already underway
|
||||
*/
|
||||
public void startBackupExclusiveOperation() throws IOException {
|
||||
LOG.debug("Start new backup exclusive operation");
|
||||
|
@ -591,11 +605,15 @@ public final class BackupSystemTable implements Closeable {
|
|||
try (Table table = connection.getTable(tableName)) {
|
||||
Put put = createPutForStartBackupSession();
|
||||
// First try to put if row does not exist
|
||||
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
||||
.ifNotExists().thenPut(put)) {
|
||||
if (
|
||||
!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
||||
.ifNotExists().thenPut(put)
|
||||
) {
|
||||
// Row exists, try to put if value == ACTIVE_SESSION_NO
|
||||
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
||||
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)) {
|
||||
if (
|
||||
!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
||||
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)
|
||||
) {
|
||||
throw new ExclusiveOperationException();
|
||||
}
|
||||
}
|
||||
|
@ -613,8 +631,10 @@ public final class BackupSystemTable implements Closeable {
|
|||
|
||||
try (Table table = connection.getTable(tableName)) {
|
||||
Put put = createPutForStopBackupSession();
|
||||
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
||||
.ifEquals(ACTIVE_SESSION_YES).thenPut(put)) {
|
||||
if (
|
||||
!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
||||
.ifEquals(ACTIVE_SESSION_YES).thenPut(put)
|
||||
) {
|
||||
throw new IOException("There is no active backup exclusive operation");
|
||||
}
|
||||
}
|
||||
|
@ -633,13 +653,13 @@ public final class BackupSystemTable implements Closeable {
|
|||
* @throws IOException exception
|
||||
*/
|
||||
public HashMap<String, Long> readRegionServerLastLogRollResult(String backupRoot)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
LOG.trace("read region server last roll log result to backup system table");
|
||||
|
||||
Scan scan = createScanForReadRegionServerLastLogRollResult(backupRoot);
|
||||
|
||||
try (Table table = connection.getTable(tableName);
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
Result res;
|
||||
HashMap<String, Long> rsTimestampMap = new HashMap<>();
|
||||
while ((res = scanner.next()) != null) {
|
||||
|
@ -656,13 +676,13 @@ public final class BackupSystemTable implements Closeable {
|
|||
|
||||
/**
|
||||
* Writes Region Server last roll log result (timestamp) to backup system table table
|
||||
* @param server Region Server name
|
||||
* @param ts last log timestamp
|
||||
* @param server Region Server name
|
||||
* @param ts last log timestamp
|
||||
* @param backupRoot root directory path to backup
|
||||
* @throws IOException exception
|
||||
*/
|
||||
public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
LOG.trace("write region server last roll log result to backup system table");
|
||||
|
||||
try (Table table = connection.getTable(tableName)) {
|
||||
|
@ -710,7 +730,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
|
||||
/**
|
||||
* Get backup history records filtered by list of filters.
|
||||
* @param n max number of records, if n == -1 , then max number is ignored
|
||||
* @param n max number of records, if n == -1 , then max number is ignored
|
||||
* @param filters list of filters
|
||||
* @return backup records
|
||||
* @throws IOException if getting the backup history fails
|
||||
|
@ -793,7 +813,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
}
|
||||
|
||||
public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
|
||||
String backupRoot) throws IOException {
|
||||
String backupRoot) throws IOException {
|
||||
List<BackupInfo> history = getBackupHistory(backupRoot);
|
||||
Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>();
|
||||
for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
|
||||
|
@ -829,7 +849,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
ArrayList<BackupInfo> list = new ArrayList<>();
|
||||
|
||||
try (Table table = connection.getTable(tableName);
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
Result res;
|
||||
while ((res = scanner.next()) != null) {
|
||||
res.advance();
|
||||
|
@ -847,16 +867,16 @@ public final class BackupSystemTable implements Closeable {
|
|||
* Write the current timestamps for each regionserver to backup system table after a successful
|
||||
* full or incremental backup. The saved timestamp is of the last log file that was backed up
|
||||
* already.
|
||||
* @param tables tables
|
||||
* @param tables tables
|
||||
* @param newTimestamps timestamps
|
||||
* @param backupRoot root directory path to backup
|
||||
* @param backupRoot root directory path to backup
|
||||
* @throws IOException exception
|
||||
*/
|
||||
public void writeRegionServerLogTimestamp(Set<TableName> tables,
|
||||
Map<String, Long> newTimestamps, String backupRoot) throws IOException {
|
||||
public void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String, Long> newTimestamps,
|
||||
String backupRoot) throws IOException {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("write RS log time stamps to backup system table for tables ["
|
||||
+ StringUtils.join(tables, ",") + "]");
|
||||
+ StringUtils.join(tables, ",") + "]");
|
||||
}
|
||||
List<Put> puts = new ArrayList<>();
|
||||
for (TableName table : tables) {
|
||||
|
@ -879,7 +899,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
* @throws IOException exception
|
||||
*/
|
||||
public Map<TableName, Map<String, Long>> readLogTimestampMap(String backupRoot)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("read RS log ts from backup system table for root=" + backupRoot);
|
||||
}
|
||||
|
@ -888,7 +908,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
|
||||
Scan scan = createScanForReadLogTimestampMap(backupRoot);
|
||||
try (Table table = connection.getTable(tableName);
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
Result res;
|
||||
while ((res = scanner.next()) != null) {
|
||||
res.advance();
|
||||
|
@ -899,11 +919,11 @@ public final class BackupSystemTable implements Closeable {
|
|||
byte[] data = CellUtil.cloneValue(cell);
|
||||
if (data == null) {
|
||||
throw new IOException("Data of last backup data from backup system table "
|
||||
+ "is empty. Create a backup first.");
|
||||
+ "is empty. Create a backup first.");
|
||||
}
|
||||
if (data != null && data.length > 0) {
|
||||
HashMap<String, Long> lastBackup =
|
||||
fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data));
|
||||
fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data));
|
||||
tableTimestampMap.put(tn, lastBackup);
|
||||
}
|
||||
}
|
||||
|
@ -912,11 +932,11 @@ public final class BackupSystemTable implements Closeable {
|
|||
}
|
||||
|
||||
private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table,
|
||||
Map<String, Long> map) {
|
||||
Map<String, Long> map) {
|
||||
BackupProtos.TableServerTimestamp.Builder tstBuilder =
|
||||
BackupProtos.TableServerTimestamp.newBuilder();
|
||||
BackupProtos.TableServerTimestamp.newBuilder();
|
||||
tstBuilder
|
||||
.setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
|
||||
.setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
|
||||
|
||||
for (Entry<String, Long> entry : map.entrySet()) {
|
||||
BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder();
|
||||
|
@ -939,7 +959,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
|
||||
for (BackupProtos.ServerTimestamp st : list) {
|
||||
ServerName sn =
|
||||
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(st.getServerName());
|
||||
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(st.getServerName());
|
||||
map.put(sn.getHostname() + ":" + sn.getPort(), st.getTimestamp());
|
||||
}
|
||||
return map;
|
||||
|
@ -973,12 +993,12 @@ public final class BackupSystemTable implements Closeable {
|
|||
|
||||
/**
|
||||
* Add tables to global incremental backup set
|
||||
* @param tables set of tables
|
||||
* @param tables set of tables
|
||||
* @param backupRoot root directory path to backup
|
||||
* @throws IOException exception
|
||||
*/
|
||||
public void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Add incremental backup table set to backup system table. ROOT=" + backupRoot
|
||||
+ " tables [" + StringUtils.join(tables, " ") + "]");
|
||||
|
@ -1019,7 +1039,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
Scan scan = createScanForBackupHistory();
|
||||
scan.setCaching(1);
|
||||
try (Table table = connection.getTable(tableName);
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
ResultScanner scanner = table.getScanner(scan)) {
|
||||
if (scanner.next() != null) {
|
||||
result = true;
|
||||
}
|
||||
|
@ -1073,13 +1093,13 @@ public final class BackupSystemTable implements Closeable {
|
|||
res.advance();
|
||||
String[] tables = cellValueToBackupSet(res.current());
|
||||
return Arrays.asList(tables).stream().map(item -> TableName.valueOf(item))
|
||||
.collect(Collectors.toList());
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add backup set (list of tables)
|
||||
* @param name set name
|
||||
* @param name set name
|
||||
* @param newTables list of tables, comma-separated
|
||||
* @throws IOException if a table operation fails
|
||||
*/
|
||||
|
@ -1105,7 +1125,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
|
||||
/**
|
||||
* Remove tables from backup set (list of tables)
|
||||
* @param name set name
|
||||
* @param name set name
|
||||
* @param toRemove list of tables
|
||||
* @throws IOException if a table operation or deleting the backup set fails
|
||||
*/
|
||||
|
@ -1132,7 +1152,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
table.put(put);
|
||||
} else if (disjoint.length == tables.length) {
|
||||
LOG.warn("Backup set '" + name + "' does not contain tables ["
|
||||
+ StringUtils.join(toRemove, " ") + "]");
|
||||
+ StringUtils.join(toRemove, " ") + "]");
|
||||
} else { // disjoint.length == 0 and tables.length >0
|
||||
// Delete backup set
|
||||
LOG.info("Backup set '" + name + "' is empty. Deleting.");
|
||||
|
@ -1176,7 +1196,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(getTableName(conf));
|
||||
|
||||
ColumnFamilyDescriptorBuilder colBuilder =
|
||||
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
|
||||
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
|
||||
|
||||
colBuilder.setMaxVersions(1);
|
||||
Configuration config = HBaseConfiguration.create();
|
||||
|
@ -1213,10 +1233,10 @@ public final class BackupSystemTable implements Closeable {
|
|||
*/
|
||||
public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) {
|
||||
TableDescriptorBuilder builder =
|
||||
TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf));
|
||||
TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf));
|
||||
|
||||
ColumnFamilyDescriptorBuilder colBuilder =
|
||||
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
|
||||
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
|
||||
colBuilder.setMaxVersions(1);
|
||||
Configuration config = HBaseConfiguration.create();
|
||||
int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
|
||||
|
@ -1375,11 +1395,11 @@ public final class BackupSystemTable implements Closeable {
|
|||
/**
|
||||
* Creates Put to write RS last roll log timestamp map
|
||||
* @param table table
|
||||
* @param smap map, containing RS:ts
|
||||
* @param smap map, containing RS:ts
|
||||
* @return put operation
|
||||
*/
|
||||
private Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap,
|
||||
String backupRoot) {
|
||||
String backupRoot) {
|
||||
Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString()));
|
||||
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("log-roll-map"), smap);
|
||||
return put;
|
||||
|
@ -1414,12 +1434,12 @@ public final class BackupSystemTable implements Closeable {
|
|||
|
||||
/**
|
||||
* Creates Put to store RS last log result
|
||||
* @param server server name
|
||||
* @param server server name
|
||||
* @param timestamp log roll result (timestamp)
|
||||
* @return put operation
|
||||
*/
|
||||
private Put createPutForRegionServerLastLogRollResult(String server, Long timestamp,
|
||||
String backupRoot) {
|
||||
String backupRoot) {
|
||||
Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server));
|
||||
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("rs-log-ts"),
|
||||
Bytes.toBytes(timestamp));
|
||||
|
@ -1458,7 +1478,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles
|
||||
*/
|
||||
static List<Put> createPutForCommittedBulkload(TableName table, byte[] region,
|
||||
Map<byte[], List<Path>> finalPaths) {
|
||||
Map<byte[], List<Path>> finalPaths) {
|
||||
List<Put> puts = new ArrayList<>();
|
||||
for (Map.Entry<byte[], List<Path>> entry : finalPaths.entrySet()) {
|
||||
for (Path path : entry.getValue()) {
|
||||
|
@ -1472,8 +1492,8 @@ public final class BackupSystemTable implements Closeable {
|
|||
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file));
|
||||
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
|
||||
puts.add(put);
|
||||
LOG.debug(
|
||||
"writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
|
||||
LOG
|
||||
.debug("writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
|
||||
}
|
||||
}
|
||||
return puts;
|
||||
|
@ -1538,7 +1558,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles
|
||||
*/
|
||||
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family,
|
||||
final List<Pair<Path, Path>> pairs) {
|
||||
final List<Pair<Path, Path>> pairs) {
|
||||
List<Put> puts = new ArrayList<>(pairs.size());
|
||||
for (Pair<Path, Path> pair : pairs) {
|
||||
Path path = pair.getSecond();
|
||||
|
@ -1740,8 +1760,8 @@ public final class BackupSystemTable implements Closeable {
|
|||
*/
|
||||
static Scan createScanForBulkLoadedFiles(String backupId) {
|
||||
Scan scan = new Scan();
|
||||
byte[] startRow = backupId == null ? BULK_LOAD_PREFIX_BYTES
|
||||
: rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM);
|
||||
byte[] startRow =
|
||||
backupId == null ? BULK_LOAD_PREFIX_BYTES : rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM);
|
||||
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
|
||||
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
|
||||
scan.withStartRow(startRow);
|
||||
|
@ -1752,7 +1772,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
}
|
||||
|
||||
static Put createPutForBulkLoadedFile(TableName tn, byte[] fam, String p, String backupId,
|
||||
long ts, int idx) {
|
||||
long ts, int idx) {
|
||||
Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM + ts + BLK_LD_DELIM + idx));
|
||||
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName());
|
||||
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam);
|
||||
|
@ -1798,7 +1818,7 @@ public final class BackupSystemTable implements Closeable {
|
|||
|
||||
/**
|
||||
* Creates Put operation to update backup set content
|
||||
* @param name backup set's name
|
||||
* @param name backup set's name
|
||||
* @param tables list of tables
|
||||
* @return put operation
|
||||
*/
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -19,7 +18,6 @@
|
|||
package org.apache.hadoop.hbase.backup.impl;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.impl;
|
||||
|
||||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_ATTEMPTS_PAUSE_MS_KEY;
|
||||
|
@ -28,7 +27,6 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.backup.BackupCopyJob;
|
||||
import org.apache.hadoop.hbase.backup.BackupInfo;
|
||||
|
@ -48,7 +46,6 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
/**
|
||||
* Full table backup implementation
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class FullTableBackupClient extends TableBackupClient {
|
||||
|
@ -58,7 +55,7 @@ public class FullTableBackupClient extends TableBackupClient {
|
|||
}
|
||||
|
||||
public FullTableBackupClient(final Connection conn, final String backupId, BackupRequest request)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
super(conn, backupId, request);
|
||||
}
|
||||
|
||||
|
@ -117,7 +114,7 @@ public class FullTableBackupClient extends TableBackupClient {
|
|||
LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + ".");
|
||||
|
||||
throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3]
|
||||
+ " with reason code " + res);
|
||||
+ " with reason code " + res);
|
||||
}
|
||||
|
||||
conf.unset(JOB_NAME_CONF_KEY);
|
||||
|
@ -127,7 +124,6 @@ public class FullTableBackupClient extends TableBackupClient {
|
|||
|
||||
/**
|
||||
* Backup request execution.
|
||||
*
|
||||
* @throws IOException if the execution of the backup fails
|
||||
*/
|
||||
@Override
|
||||
|
@ -163,9 +159,8 @@ public class FullTableBackupClient extends TableBackupClient {
|
|||
// SNAPSHOT_TABLES:
|
||||
backupInfo.setPhase(BackupPhase.SNAPSHOT);
|
||||
for (TableName tableName : tableList) {
|
||||
String snapshotName =
|
||||
"snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
|
||||
+ tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
|
||||
String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime())
|
||||
+ "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
|
||||
|
||||
snapshotTable(admin, tableName, snapshotName);
|
||||
backupInfo.setSnapshotName(tableName, snapshotName);
|
||||
|
@ -187,12 +182,11 @@ public class FullTableBackupClient extends TableBackupClient {
|
|||
backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
|
||||
|
||||
Map<TableName, Map<String, Long>> newTableSetTimestampMap =
|
||||
backupManager.readLogTimestampMap();
|
||||
backupManager.readLogTimestampMap();
|
||||
|
||||
backupInfo.setTableSetTimestampMap(newTableSetTimestampMap);
|
||||
Long newStartCode =
|
||||
BackupUtils.getMinValue(BackupUtils
|
||||
.getRSLogTimestampMins(newTableSetTimestampMap));
|
||||
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
|
||||
backupManager.writeBackupStartCode(newStartCode);
|
||||
|
||||
// backup complete
|
||||
|
@ -205,11 +199,9 @@ public class FullTableBackupClient extends TableBackupClient {
|
|||
}
|
||||
|
||||
protected void snapshotTable(Admin admin, TableName tableName, String snapshotName)
|
||||
throws IOException {
|
||||
int maxAttempts =
|
||||
conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS);
|
||||
int pause =
|
||||
conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS);
|
||||
throws IOException {
|
||||
int maxAttempts = conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS);
|
||||
int pause = conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS);
|
||||
int attempts = 0;
|
||||
|
||||
while (attempts++ < maxAttempts) {
|
||||
|
@ -218,7 +210,7 @@ public class FullTableBackupClient extends TableBackupClient {
|
|||
return;
|
||||
} catch (IOException ee) {
|
||||
LOG.warn("Snapshot attempt " + attempts + " failed for table " + tableName
|
||||
+ ", sleeping for " + pause + "ms", ee);
|
||||
+ ", sleeping for " + pause + "ms", ee);
|
||||
if (attempts < maxAttempts) {
|
||||
try {
|
||||
Thread.sleep(pause);
|
||||
|
@ -229,6 +221,6 @@ public class FullTableBackupClient extends TableBackupClient {
|
|||
}
|
||||
}
|
||||
}
|
||||
throw new IOException("Failed to snapshot table "+ tableName);
|
||||
throw new IOException("Failed to snapshot table " + tableName);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.impl;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -77,11 +76,11 @@ public class IncrementalBackupManager extends BackupManager {
|
|||
LOG.debug("StartCode " + savedStartCode + "for backupID " + backupInfo.getBackupId());
|
||||
}
|
||||
// get all new log files from .logs and .oldlogs after last TS and before new timestamp
|
||||
if (savedStartCode == null || previousTimestampMins == null
|
||||
|| previousTimestampMins.isEmpty()) {
|
||||
throw new IOException(
|
||||
"Cannot read any previous back up timestamps from backup system table. "
|
||||
+ "In order to create an incremental backup, at least one full backup is needed.");
|
||||
if (
|
||||
savedStartCode == null || previousTimestampMins == null || previousTimestampMins.isEmpty()
|
||||
) {
|
||||
throw new IOException("Cannot read any previous back up timestamps from backup system table. "
|
||||
+ "In order to create an incremental backup, at least one full backup is needed.");
|
||||
}
|
||||
|
||||
LOG.info("Execute roll log procedure for incremental backup ...");
|
||||
|
@ -103,9 +102,9 @@ public class IncrementalBackupManager extends BackupManager {
|
|||
|
||||
private List<String> excludeProcV2WALs(List<String> logList) {
|
||||
List<String> list = new ArrayList<>();
|
||||
for (int i=0; i < logList.size(); i++) {
|
||||
for (int i = 0; i < logList.size(); i++) {
|
||||
Path p = new Path(logList.get(i));
|
||||
String name = p.getName();
|
||||
String name = p.getName();
|
||||
|
||||
if (name.startsWith(WALProcedureStore.LOG_PREFIX)) {
|
||||
continue;
|
||||
|
@ -119,18 +118,18 @@ public class IncrementalBackupManager extends BackupManager {
|
|||
/**
|
||||
* For each region server: get all log files newer than the last timestamps but not newer than the
|
||||
* newest timestamps.
|
||||
* @param olderTimestamps the timestamp for each region server of the last backup.
|
||||
* @param olderTimestamps the timestamp for each region server of the last backup.
|
||||
* @param newestTimestamps the timestamp for each region server that the backup should lead to.
|
||||
* @param conf the Hadoop and Hbase configuration
|
||||
* @param savedStartCode the startcode (timestamp) of last successful backup.
|
||||
* @param conf the Hadoop and Hbase configuration
|
||||
* @param savedStartCode the startcode (timestamp) of last successful backup.
|
||||
* @return a list of log files to be backed up
|
||||
* @throws IOException exception
|
||||
*/
|
||||
private List<String> getLogFilesForNewBackup(Map<String, Long> olderTimestamps,
|
||||
Map<String, Long> newestTimestamps, Configuration conf, String savedStartCode)
|
||||
throws IOException {
|
||||
Map<String, Long> newestTimestamps, Configuration conf, String savedStartCode)
|
||||
throws IOException {
|
||||
LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps
|
||||
+ "\n newestTimestamps: " + newestTimestamps);
|
||||
+ "\n newestTimestamps: " + newestTimestamps);
|
||||
|
||||
Path walRootDir = CommonFSUtils.getWALRootDir(conf);
|
||||
Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
|
||||
|
@ -191,10 +190,10 @@ public class IncrementalBackupManager extends BackupManager {
|
|||
// or RS is down (was decommisioned). In any case, we treat this
|
||||
// log file as eligible for inclusion into incremental backup log list
|
||||
Long ts = newestTimestamps.get(host);
|
||||
if (ts == null) {
|
||||
if (ts == null) {
|
||||
LOG.warn("ORPHAN log found: " + log + " host=" + host);
|
||||
LOG.debug("Known hosts (from newestTimestamps):");
|
||||
for (String s: newestTimestamps.keySet()) {
|
||||
for (String s : newestTimestamps.keySet()) {
|
||||
LOG.debug(s);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.impl;
|
||||
|
||||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
|
||||
|
@ -53,9 +52,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Incremental backup implementation.
|
||||
* See the {@link #execute() execute} method.
|
||||
*
|
||||
* Incremental backup implementation. See the {@link #execute() execute} method.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class IncrementalTableBackupClient extends TableBackupClient {
|
||||
|
@ -65,7 +62,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
}
|
||||
|
||||
public IncrementalTableBackupClient(final Connection conn, final String backupId,
|
||||
BackupRequest request) throws IOException {
|
||||
BackupRequest request) throws IOException {
|
||||
super(conn, backupId, request);
|
||||
}
|
||||
|
||||
|
@ -105,19 +102,19 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
}
|
||||
|
||||
/*
|
||||
* Reads bulk load records from backup table, iterates through the records and forms the paths
|
||||
* for bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination
|
||||
* Reads bulk load records from backup table, iterates through the records and forms the paths for
|
||||
* bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination
|
||||
* @param sTableList list of tables to be backed up
|
||||
* @return map of table to List of files
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Map<byte[], List<Path>>[] handleBulkLoad(List<TableName> sTableList)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList.size()];
|
||||
List<String> activeFiles = new ArrayList<>();
|
||||
List<String> archiveFiles = new ArrayList<>();
|
||||
Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair =
|
||||
backupManager.readBulkloadRows(sTableList);
|
||||
backupManager.readBulkloadRows(sTableList);
|
||||
Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map = pair.getFirst();
|
||||
FileSystem tgtFs;
|
||||
try {
|
||||
|
@ -128,8 +125,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
Path rootdir = CommonFSUtils.getRootDir(conf);
|
||||
Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId);
|
||||
|
||||
for (Map.Entry<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> tblEntry :
|
||||
map.entrySet()) {
|
||||
for (Map.Entry<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> tblEntry : map
|
||||
.entrySet()) {
|
||||
TableName srcTable = tblEntry.getKey();
|
||||
|
||||
int srcIdx = getIndex(srcTable, sTableList);
|
||||
|
@ -142,14 +139,14 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
}
|
||||
Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable);
|
||||
Path tgtTable = new Path(new Path(tgtRoot, srcTable.getNamespaceAsString()),
|
||||
srcTable.getQualifierAsString());
|
||||
for (Map.Entry<String,Map<String,List<Pair<String, Boolean>>>> regionEntry :
|
||||
tblEntry.getValue().entrySet()){
|
||||
srcTable.getQualifierAsString());
|
||||
for (Map.Entry<String, Map<String, List<Pair<String, Boolean>>>> regionEntry : tblEntry
|
||||
.getValue().entrySet()) {
|
||||
String regionName = regionEntry.getKey();
|
||||
Path regionDir = new Path(tblDir, regionName);
|
||||
// map from family to List of hfiles
|
||||
for (Map.Entry<String,List<Pair<String, Boolean>>> famEntry :
|
||||
regionEntry.getValue().entrySet()) {
|
||||
for (Map.Entry<String, List<Pair<String, Boolean>>> famEntry : regionEntry.getValue()
|
||||
.entrySet()) {
|
||||
String fam = famEntry.getKey();
|
||||
Path famDir = new Path(regionDir, fam);
|
||||
List<Path> files;
|
||||
|
@ -170,7 +167,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
int idx = file.lastIndexOf("/");
|
||||
String filename = file;
|
||||
if (idx > 0) {
|
||||
filename = file.substring(idx+1);
|
||||
filename = file.substring(idx + 1);
|
||||
}
|
||||
Path p = new Path(famDir, filename);
|
||||
Path tgt = new Path(tgtFam, filename);
|
||||
|
@ -183,7 +180,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
LOG.trace("copying " + p + " to " + tgt);
|
||||
}
|
||||
activeFiles.add(p.toString());
|
||||
} else if (fs.exists(archive)){
|
||||
} else if (fs.exists(archive)) {
|
||||
LOG.debug("copying archive " + archive + " to " + tgt);
|
||||
archiveFiles.add(archive.toString());
|
||||
}
|
||||
|
@ -199,7 +196,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
}
|
||||
|
||||
private void copyBulkLoadedFiles(List<String> activeFiles, List<String> archiveFiles)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
try {
|
||||
// Enable special mode of BackupDistCp
|
||||
conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 5);
|
||||
|
@ -207,8 +204,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
String tgtDest = backupInfo.getBackupRootDir() + Path.SEPARATOR + backupInfo.getBackupId();
|
||||
int attempt = 1;
|
||||
while (activeFiles.size() > 0) {
|
||||
LOG.info("Copy "+ activeFiles.size() +
|
||||
" active bulk loaded files. Attempt ="+ (attempt++));
|
||||
LOG.info(
|
||||
"Copy " + activeFiles.size() + " active bulk loaded files. Attempt =" + (attempt++));
|
||||
String[] toCopy = new String[activeFiles.size()];
|
||||
activeFiles.toArray(toCopy);
|
||||
// Active file can be archived during copy operation,
|
||||
|
@ -245,7 +242,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
}
|
||||
|
||||
private void updateFileLists(List<String> activeFiles, List<String> archiveFiles)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
List<String> newlyArchived = new ArrayList<>();
|
||||
|
||||
for (String spath : activeFiles) {
|
||||
|
@ -269,9 +266,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
beginBackup(backupManager, backupInfo);
|
||||
backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL);
|
||||
LOG.debug("For incremental backup, current table set is "
|
||||
+ backupManager.getIncrementalBackupTableSet());
|
||||
newTimestamps =
|
||||
((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
|
||||
+ backupManager.getIncrementalBackupTableSet());
|
||||
newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
|
||||
} catch (Exception e) {
|
||||
// fail the overall backup and return
|
||||
failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ",
|
||||
|
@ -285,8 +281,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
|
||||
// convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
|
||||
convertWALsToHFiles();
|
||||
incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()},
|
||||
backupInfo.getBackupRootDir());
|
||||
incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() },
|
||||
backupInfo.getBackupRootDir());
|
||||
} catch (Exception e) {
|
||||
String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId;
|
||||
// fail the overall backup and return
|
||||
|
@ -298,8 +294,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
// After this checkpoint, even if entering cancel process, will let the backup finished
|
||||
try {
|
||||
// Set the previousTimestampMap which is before this current log roll to the manifest.
|
||||
Map<TableName, Map<String, Long>> previousTimestampMap =
|
||||
backupManager.readLogTimestampMap();
|
||||
Map<TableName, Map<String, Long>> previousTimestampMap = backupManager.readLogTimestampMap();
|
||||
backupInfo.setIncrTimestampMap(previousTimestampMap);
|
||||
|
||||
// The table list in backupInfo is good for both full backup and incremental backup.
|
||||
|
@ -307,11 +302,11 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
|
||||
|
||||
Map<TableName, Map<String, Long>> newTableSetTimestampMap =
|
||||
backupManager.readLogTimestampMap();
|
||||
backupManager.readLogTimestampMap();
|
||||
|
||||
backupInfo.setTableSetTimestampMap(newTableSetTimestampMap);
|
||||
Long newStartCode =
|
||||
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
|
||||
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
|
||||
backupManager.writeBackupStartCode(newStartCode);
|
||||
|
||||
handleBulkLoad(backupInfo.getTableNames());
|
||||
|
@ -345,11 +340,11 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr);
|
||||
if (res != 0) {
|
||||
LOG.error("Copy incremental HFile files failed with return code: " + res + ".");
|
||||
throw new IOException("Failed copy from " + StringUtils.join(files, ',')
|
||||
+ " to " + backupDest);
|
||||
throw new IOException(
|
||||
"Failed copy from " + StringUtils.join(files, ',') + " to " + backupDest);
|
||||
}
|
||||
LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',')
|
||||
+ " to " + backupDest + " finished.");
|
||||
LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',') + " to " + backupDest
|
||||
+ " finished.");
|
||||
} finally {
|
||||
deleteBulkLoadDirectory();
|
||||
}
|
||||
|
@ -398,7 +393,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
// a Map task for each file. We use ';' as separator
|
||||
// because WAL file names contains ','
|
||||
String dirs = StringUtils.join(dirPaths, ';');
|
||||
String jobname = "Incremental_Backup-" + backupId ;
|
||||
String jobname = "Incremental_Backup-" + backupId;
|
||||
|
||||
Path bulkOutputPath = getBulkOutputDir();
|
||||
conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
|
||||
|
@ -410,7 +405,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
try {
|
||||
player.setConf(conf);
|
||||
int result = player.run(playerArgs);
|
||||
if(result != 0) {
|
||||
if (result != 0) {
|
||||
throw new IOException("WAL Player failed");
|
||||
}
|
||||
conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY);
|
||||
|
@ -419,7 +414,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
|
|||
throw e;
|
||||
} catch (Exception ee) {
|
||||
throw new IOException("Can not convert from directory " + dirs
|
||||
+ " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee);
|
||||
+ " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.impl;
|
||||
|
||||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
|
||||
|
@ -25,7 +24,6 @@ import java.util.ArrayList;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -47,7 +45,6 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
/**
|
||||
* Restore table implementation
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RestoreTablesClient {
|
||||
|
@ -76,7 +73,6 @@ public class RestoreTablesClient {
|
|||
|
||||
/**
|
||||
* Validate target tables.
|
||||
*
|
||||
* @param tTableArray target tables
|
||||
* @param isOverwrite overwrite existing table
|
||||
* @throws IOException exception
|
||||
|
@ -95,26 +91,25 @@ public class RestoreTablesClient {
|
|||
}
|
||||
} else {
|
||||
LOG.info("HBase table " + tableName
|
||||
+ " does not exist. It will be created during restore process");
|
||||
+ " does not exist. It will be created during restore process");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (existTableList.size() > 0) {
|
||||
if (!isOverwrite) {
|
||||
LOG.error("Existing table (" + existTableList
|
||||
+ ") found in the restore target, please add "
|
||||
+ "\"-o\" as overwrite option in the command if you mean"
|
||||
+ " to restore to these existing tables");
|
||||
throw new IOException("Existing table found in target while no \"-o\" "
|
||||
+ "as overwrite option found");
|
||||
LOG.error("Existing table (" + existTableList + ") found in the restore target, please add "
|
||||
+ "\"-o\" as overwrite option in the command if you mean"
|
||||
+ " to restore to these existing tables");
|
||||
throw new IOException(
|
||||
"Existing table found in target while no \"-o\" " + "as overwrite option found");
|
||||
} else {
|
||||
if (disabledTableList.size() > 0) {
|
||||
LOG.error("Found offline table in the restore target, "
|
||||
+ "please enable them before restore with \"-overwrite\" option");
|
||||
+ "please enable them before restore with \"-overwrite\" option");
|
||||
LOG.info("Offline table list in restore target: " + disabledTableList);
|
||||
throw new IOException(
|
||||
"Found offline table in the target when restore with \"-overwrite\" option");
|
||||
"Found offline table in the target when restore with \"-overwrite\" option");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -122,16 +117,15 @@ public class RestoreTablesClient {
|
|||
|
||||
/**
|
||||
* Restore operation handle each backupImage in array.
|
||||
*
|
||||
* @param images array BackupImage
|
||||
* @param sTable table to be restored
|
||||
* @param tTable table to be restored to
|
||||
* @param images array BackupImage
|
||||
* @param sTable table to be restored
|
||||
* @param tTable table to be restored to
|
||||
* @param truncateIfExists truncate table
|
||||
* @throws IOException exception
|
||||
*/
|
||||
|
||||
private void restoreImages(BackupImage[] images, TableName sTable, TableName tTable,
|
||||
boolean truncateIfExists) throws IOException {
|
||||
boolean truncateIfExists) throws IOException {
|
||||
// First image MUST be image of a FULL backup
|
||||
BackupImage image = images[0];
|
||||
String rootDir = image.getRootDir();
|
||||
|
@ -144,7 +138,7 @@ public class RestoreTablesClient {
|
|||
BackupManifest manifest = HBackupFileSystem.getManifest(conf, backupRoot, backupId);
|
||||
if (manifest.getType() == BackupType.FULL) {
|
||||
LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from full" + " backup image "
|
||||
+ tableBackupPath.toString());
|
||||
+ tableBackupPath.toString());
|
||||
conf.set(JOB_NAME_CONF_KEY, "Full_Restore-" + backupId + "-" + tTable);
|
||||
restoreTool.fullRestoreTable(conn, tableBackupPath, sTable, tTable, truncateIfExists,
|
||||
lastIncrBackupId);
|
||||
|
@ -164,7 +158,7 @@ public class RestoreTablesClient {
|
|||
for (int i = 1; i < images.length; i++) {
|
||||
BackupImage im = images[i];
|
||||
String fileBackupDir =
|
||||
HBackupFileSystem.getTableBackupDir(im.getRootDir(), im.getBackupId(), sTable);
|
||||
HBackupFileSystem.getTableBackupDir(im.getRootDir(), im.getBackupId(), sTable);
|
||||
List<Path> list = getFilesRecursively(fileBackupDir);
|
||||
dirList.addAll(list);
|
||||
|
||||
|
@ -186,7 +180,7 @@ public class RestoreTablesClient {
|
|||
}
|
||||
|
||||
private List<Path> getFilesRecursively(String fileBackupDir)
|
||||
throws IllegalArgumentException, IOException {
|
||||
throws IllegalArgumentException, IOException {
|
||||
FileSystem fs = FileSystem.get((new Path(fileBackupDir)).toUri(), new Configuration());
|
||||
List<Path> list = new ArrayList<>();
|
||||
RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(fileBackupDir), true);
|
||||
|
@ -202,12 +196,12 @@ public class RestoreTablesClient {
|
|||
/**
|
||||
* Restore operation. Stage 2: resolved Backup Image dependency
|
||||
* @param backupManifestMap : tableName, Manifest
|
||||
* @param sTableArray The array of tables to be restored
|
||||
* @param tTableArray The array of mapping tables to restore to
|
||||
* @param sTableArray The array of tables to be restored
|
||||
* @param tTableArray The array of mapping tables to restore to
|
||||
* @throws IOException exception
|
||||
*/
|
||||
private void restore(HashMap<TableName, BackupManifest> backupManifestMap,
|
||||
TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException {
|
||||
TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException {
|
||||
TreeSet<BackupImage> restoreImageSet = new TreeSet<>();
|
||||
|
||||
for (int i = 0; i < sTableArray.length; i++) {
|
||||
|
@ -229,8 +223,7 @@ public class RestoreTablesClient {
|
|||
LOG.info("Restore includes the following image(s):");
|
||||
for (BackupImage image : restoreImageSet) {
|
||||
LOG.info("Backup: " + image.getBackupId() + " "
|
||||
+ HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(),
|
||||
table));
|
||||
+ HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
@ -44,10 +44,9 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Base class for backup operation. Concrete implementation for
|
||||
* full and incremental backup are delegated to corresponding sub-classes:
|
||||
* {@link FullTableBackupClient} and {@link IncrementalTableBackupClient}
|
||||
*
|
||||
* Base class for backup operation. Concrete implementation for full and incremental backup are
|
||||
* delegated to corresponding sub-classes: {@link FullTableBackupClient} and
|
||||
* {@link IncrementalTableBackupClient}
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public abstract class TableBackupClient {
|
||||
|
@ -72,12 +71,12 @@ public abstract class TableBackupClient {
|
|||
}
|
||||
|
||||
public TableBackupClient(final Connection conn, final String backupId, BackupRequest request)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
init(conn, backupId, request);
|
||||
}
|
||||
|
||||
public void init(final Connection conn, final String backupId, BackupRequest request)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
if (request.getBackupType() == BackupType.FULL) {
|
||||
backupManager = new BackupManager(conn, conn.getConfiguration());
|
||||
} else {
|
||||
|
@ -88,9 +87,8 @@ public abstract class TableBackupClient {
|
|||
this.conn = conn;
|
||||
this.conf = conn.getConfiguration();
|
||||
this.fs = CommonFSUtils.getCurrentFileSystem(conf);
|
||||
backupInfo =
|
||||
backupManager.createBackupInfo(backupId, request.getBackupType(), tableList,
|
||||
request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth());
|
||||
backupInfo = backupManager.createBackupInfo(backupId, request.getBackupType(), tableList,
|
||||
request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth());
|
||||
if (tableList == null || tableList.isEmpty()) {
|
||||
this.tableList = new ArrayList<>(backupInfo.getTables());
|
||||
}
|
||||
|
@ -104,7 +102,7 @@ public abstract class TableBackupClient {
|
|||
* @throws IOException exception
|
||||
*/
|
||||
protected void beginBackup(BackupManager backupManager, BackupInfo backupInfo)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
|
||||
BackupSystemTable.snapshot(conn);
|
||||
backupManager.setBackupInfo(backupInfo);
|
||||
|
@ -136,7 +134,7 @@ public abstract class TableBackupClient {
|
|||
* @throws IOException exception
|
||||
*/
|
||||
protected static void deleteSnapshots(final Connection conn, BackupInfo backupInfo,
|
||||
Configuration conf) throws IOException {
|
||||
Configuration conf) throws IOException {
|
||||
LOG.debug("Trying to delete snapshot for full backup.");
|
||||
for (String snapshotName : backupInfo.getSnapshotNames()) {
|
||||
if (snapshotName == null) {
|
||||
|
@ -148,7 +146,7 @@ public abstract class TableBackupClient {
|
|||
admin.deleteSnapshot(snapshotName);
|
||||
}
|
||||
LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupInfo.getBackupId()
|
||||
+ " succeeded.");
|
||||
+ " succeeded.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,9 +157,8 @@ public abstract class TableBackupClient {
|
|||
*/
|
||||
protected static void cleanupExportSnapshotLog(Configuration conf) throws IOException {
|
||||
FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf);
|
||||
Path stagingDir =
|
||||
new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory()
|
||||
.toString()));
|
||||
Path stagingDir = new Path(
|
||||
conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory().toString()));
|
||||
FileStatus[] files = CommonFSUtils.listStatus(fs, stagingDir);
|
||||
if (files == null) {
|
||||
return;
|
||||
|
@ -177,30 +174,29 @@ public abstract class TableBackupClient {
|
|||
}
|
||||
|
||||
/**
|
||||
* Clean up the uncompleted data at target directory if the ongoing backup has already entered
|
||||
* the copy phase.
|
||||
* Clean up the uncompleted data at target directory if the ongoing backup has already entered the
|
||||
* copy phase.
|
||||
*/
|
||||
protected static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
|
||||
try {
|
||||
// clean up the uncompleted data at target directory if the ongoing backup has already entered
|
||||
// the copy phase
|
||||
LOG.debug("Trying to cleanup up target dir. Current backup phase: "
|
||||
+ backupInfo.getPhase());
|
||||
if (backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
|
||||
LOG.debug("Trying to cleanup up target dir. Current backup phase: " + backupInfo.getPhase());
|
||||
if (
|
||||
backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
|
||||
|| backupInfo.getPhase().equals(BackupPhase.INCREMENTAL_COPY)
|
||||
|| backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST)) {
|
||||
FileSystem outputFs =
|
||||
FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
|
||||
|| backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST)
|
||||
) {
|
||||
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
|
||||
|
||||
// now treat one backup as a transaction, clean up data that has been partially copied at
|
||||
// table level
|
||||
for (TableName table : backupInfo.getTables()) {
|
||||
Path targetDirPath =
|
||||
new Path(HBackupFileSystem.getTableBackupDir(backupInfo.getBackupRootDir(),
|
||||
backupInfo.getBackupId(), table));
|
||||
Path targetDirPath = new Path(HBackupFileSystem
|
||||
.getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
|
||||
if (outputFs.delete(targetDirPath, true)) {
|
||||
LOG.debug("Cleaning up uncompleted backup data at " + targetDirPath.toString()
|
||||
+ " done.");
|
||||
LOG.debug(
|
||||
"Cleaning up uncompleted backup data at " + targetDirPath.toString() + " done.");
|
||||
} else {
|
||||
LOG.debug("No data has been copied to " + targetDirPath.toString() + ".");
|
||||
}
|
||||
|
@ -216,18 +212,18 @@ public abstract class TableBackupClient {
|
|||
|
||||
} catch (IOException e1) {
|
||||
LOG.error("Cleaning up uncompleted backup data of " + backupInfo.getBackupId() + " at "
|
||||
+ backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
|
||||
+ backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fail the overall backup.
|
||||
* @param backupInfo backup info
|
||||
* @param e exception
|
||||
* @param e exception
|
||||
* @throws IOException exception
|
||||
*/
|
||||
protected void failBackup(Connection conn, BackupInfo backupInfo, BackupManager backupManager,
|
||||
Exception e, String msg, BackupType type, Configuration conf) throws IOException {
|
||||
Exception e, String msg, BackupType type, Configuration conf) throws IOException {
|
||||
try {
|
||||
LOG.error(msg + getMessage(e), e);
|
||||
// If this is a cancel exception, then we've already cleaned.
|
||||
|
@ -238,10 +234,9 @@ public abstract class TableBackupClient {
|
|||
// set overall backup status: failed
|
||||
backupInfo.setState(BackupState.FAILED);
|
||||
// compose the backup failed data
|
||||
String backupFailedData =
|
||||
"BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs()
|
||||
+ ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" + backupInfo.getPhase()
|
||||
+ ",failedmessage=" + backupInfo.getFailedMsg();
|
||||
String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts="
|
||||
+ backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
|
||||
+ backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
|
||||
LOG.error(backupFailedData);
|
||||
cleanupAndRestoreBackupSystem(conn, backupInfo, conf);
|
||||
// If backup session is updated to FAILED state - means we
|
||||
|
@ -256,7 +251,7 @@ public abstract class TableBackupClient {
|
|||
}
|
||||
|
||||
public static void cleanupAndRestoreBackupSystem(Connection conn, BackupInfo backupInfo,
|
||||
Configuration conf) throws IOException {
|
||||
Configuration conf) throws IOException {
|
||||
BackupType type = backupInfo.getType();
|
||||
// if full backup, then delete HBase snapshots if there already are snapshots taken
|
||||
// and also clean up export snapshot log files if exist
|
||||
|
@ -278,7 +273,7 @@ public abstract class TableBackupClient {
|
|||
* @throws IOException exception
|
||||
*/
|
||||
protected void addManifest(BackupInfo backupInfo, BackupManager backupManager, BackupType type,
|
||||
Configuration conf) throws IOException {
|
||||
Configuration conf) throws IOException {
|
||||
// set the overall backup phase : store manifest
|
||||
backupInfo.setPhase(BackupPhase.STORE_MANIFEST);
|
||||
|
||||
|
@ -365,7 +360,7 @@ public abstract class TableBackupClient {
|
|||
* @throws IOException exception
|
||||
*/
|
||||
protected void completeBackup(final Connection conn, BackupInfo backupInfo,
|
||||
BackupManager backupManager, BackupType type, Configuration conf) throws IOException {
|
||||
BackupManager backupManager, BackupType type, Configuration conf) throws IOException {
|
||||
// set the complete timestamp of the overall backup
|
||||
backupInfo.setCompleteTs(EnvironmentEdgeManager.currentTime());
|
||||
// set overall backup status: complete
|
||||
|
@ -376,9 +371,8 @@ public abstract class TableBackupClient {
|
|||
|
||||
// compose the backup complete data
|
||||
String backupCompleteData =
|
||||
obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs()
|
||||
+ ",completets=" + backupInfo.getCompleteTs() + ",bytescopied="
|
||||
+ backupInfo.getTotalBytesCopied();
|
||||
obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs() + ",completets="
|
||||
+ backupInfo.getCompleteTs() + ",bytescopied=" + backupInfo.getTotalBytesCopied();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Backup " + backupInfo.getBackupId() + " finished: " + backupCompleteData);
|
||||
}
|
||||
|
@ -404,23 +398,26 @@ public abstract class TableBackupClient {
|
|||
|
||||
/**
|
||||
* Backup request execution.
|
||||
*
|
||||
* @throws IOException if the execution of the backup fails
|
||||
*/
|
||||
public abstract void execute() throws IOException;
|
||||
|
||||
protected Stage getTestStage() {
|
||||
return Stage.valueOf("stage_"+ conf.getInt(BACKUP_TEST_MODE_STAGE, 0));
|
||||
return Stage.valueOf("stage_" + conf.getInt(BACKUP_TEST_MODE_STAGE, 0));
|
||||
}
|
||||
|
||||
protected void failStageIf(Stage stage) throws IOException {
|
||||
Stage current = getTestStage();
|
||||
if (current == stage) {
|
||||
throw new IOException("Failed stage " + stage+" in testing");
|
||||
throw new IOException("Failed stage " + stage + " in testing");
|
||||
}
|
||||
}
|
||||
|
||||
public enum Stage {
|
||||
stage_0, stage_1, stage_2, stage_3, stage_4
|
||||
stage_0,
|
||||
stage_1,
|
||||
stage_2,
|
||||
stage_3,
|
||||
stage_4
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -25,7 +25,6 @@ import java.math.BigDecimal;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -125,29 +124,27 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
|
||||
/**
|
||||
* Update the ongoing backup with new progress.
|
||||
* @param backupInfo backup info
|
||||
* @param backupInfo backup info
|
||||
* @param newProgress progress
|
||||
* @param bytesCopied bytes copied
|
||||
* @throws NoNodeException exception
|
||||
*/
|
||||
static void updateProgress(BackupInfo backupInfo, BackupManager backupManager,
|
||||
int newProgress, long bytesCopied) throws IOException {
|
||||
static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, int newProgress,
|
||||
long bytesCopied) throws IOException {
|
||||
// compose the new backup progress data, using fake number for now
|
||||
String backupProgressData = newProgress + "%";
|
||||
|
||||
backupInfo.setProgress(newProgress);
|
||||
backupManager.updateBackupInfo(backupInfo);
|
||||
LOG.debug("Backup progress data \"" + backupProgressData
|
||||
+ "\" has been updated to backup system table for " + backupInfo.getBackupId());
|
||||
+ "\" has been updated to backup system table for " + backupInfo.getBackupId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Extends DistCp for progress updating to backup system table
|
||||
* during backup. Using DistCpV2 (MAPREDUCE-2765).
|
||||
* Simply extend it and override execute() method to get the
|
||||
* Job reference for progress updating.
|
||||
* Only the argument "src1, [src2, [...]] dst" is supported,
|
||||
* no more DistCp options.
|
||||
* Extends DistCp for progress updating to backup system table during backup. Using DistCpV2
|
||||
* (MAPREDUCE-2765). Simply extend it and override execute() method to get the Job reference for
|
||||
* progress updating. Only the argument "src1, [src2, [...]] dst" is supported, no more DistCp
|
||||
* options.
|
||||
*/
|
||||
|
||||
class BackupDistCp extends DistCp {
|
||||
|
@ -156,14 +153,12 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
private BackupManager backupManager;
|
||||
|
||||
public BackupDistCp(Configuration conf, DistCpOptions options, BackupInfo backupInfo,
|
||||
BackupManager backupManager) throws Exception {
|
||||
BackupManager backupManager) throws Exception {
|
||||
super(conf, options);
|
||||
this.backupInfo = backupInfo;
|
||||
this.backupManager = backupManager;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public Job execute() throws Exception {
|
||||
|
||||
|
@ -188,43 +183,41 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
|
||||
long totalSrcLgth = 0;
|
||||
for (Path aSrc : srcs) {
|
||||
totalSrcLgth +=
|
||||
BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc);
|
||||
totalSrcLgth += BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc);
|
||||
}
|
||||
|
||||
// Async call
|
||||
job = super.execute();
|
||||
// Update the copy progress to system table every 0.5s if progress value changed
|
||||
int progressReportFreq =
|
||||
MapReduceBackupCopyJob.this.getConf().getInt("hbase.backup.progressreport.frequency",
|
||||
500);
|
||||
int progressReportFreq = MapReduceBackupCopyJob.this.getConf()
|
||||
.getInt("hbase.backup.progressreport.frequency", 500);
|
||||
float lastProgress = progressDone;
|
||||
while (!job.isComplete()) {
|
||||
float newProgress =
|
||||
progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS);
|
||||
progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS);
|
||||
|
||||
if (newProgress > lastProgress) {
|
||||
|
||||
BigDecimal progressData =
|
||||
new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP);
|
||||
new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP);
|
||||
String newProgressStr = progressData + "%";
|
||||
LOG.info("Progress: " + newProgressStr);
|
||||
updateProgress(backupInfo, backupManager, progressData.intValue(), bytesCopied);
|
||||
LOG.debug("Backup progress data updated to backup system table: \"Progress: "
|
||||
+ newProgressStr + ".\"");
|
||||
+ newProgressStr + ".\"");
|
||||
lastProgress = newProgress;
|
||||
}
|
||||
Thread.sleep(progressReportFreq);
|
||||
}
|
||||
// update the progress data after copy job complete
|
||||
float newProgress =
|
||||
progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS);
|
||||
progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS);
|
||||
BigDecimal progressData =
|
||||
new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP);
|
||||
new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP);
|
||||
|
||||
String newProgressStr = progressData + "%";
|
||||
LOG.info("Progress: " + newProgressStr + " subTask: " + subTaskPercntgInWholeTask
|
||||
+ " mapProgress: " + job.mapProgress());
|
||||
+ " mapProgress: " + job.mapProgress());
|
||||
|
||||
// accumulate the overall backup progress
|
||||
progressDone = newProgress;
|
||||
|
@ -232,7 +225,7 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
|
||||
updateProgress(backupInfo, backupManager, progressData.intValue(), bytesCopied);
|
||||
LOG.debug("Backup progress data updated to backup system table: \"Progress: "
|
||||
+ newProgressStr + " - " + bytesCopied + " bytes copied.\"");
|
||||
+ newProgressStr + " - " + bytesCopied + " bytes copied.\"");
|
||||
} catch (Throwable t) {
|
||||
LOG.error(t.toString(), t);
|
||||
throw t;
|
||||
|
@ -241,8 +234,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
String jobID = job.getJobID().toString();
|
||||
job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID);
|
||||
|
||||
LOG.debug("DistCp job-id: " + jobID + " completed: " + job.isComplete() + " "
|
||||
+ job.isSuccessful());
|
||||
LOG.debug(
|
||||
"DistCp job-id: " + jobID + " completed: " + job.isComplete() + " " + job.isSuccessful());
|
||||
Counters ctrs = job.getCounters();
|
||||
LOG.debug(Objects.toString(ctrs));
|
||||
if (job.isComplete() && !job.isSuccessful()) {
|
||||
|
@ -252,11 +245,11 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
return job;
|
||||
}
|
||||
|
||||
private Field getInputOptionsField(Class<?> classDistCp) throws IOException{
|
||||
private Field getInputOptionsField(Class<?> classDistCp) throws IOException {
|
||||
Field f = null;
|
||||
try {
|
||||
f = classDistCp.getDeclaredField("inputOptions");
|
||||
} catch(Exception e) {
|
||||
} catch (Exception e) {
|
||||
// Haddop 3
|
||||
try {
|
||||
f = classDistCp.getDeclaredField("context");
|
||||
|
@ -268,7 +261,7 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private List<Path> getSourcePaths(Field fieldInputOptions) throws IOException{
|
||||
private List<Path> getSourcePaths(Field fieldInputOptions) throws IOException {
|
||||
Object options;
|
||||
try {
|
||||
options = fieldInputOptions.get(this);
|
||||
|
@ -282,9 +275,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
|
||||
return (List<Path>) methodGetSourcePaths.invoke(options);
|
||||
}
|
||||
} catch (IllegalArgumentException | IllegalAccessException |
|
||||
ClassNotFoundException | NoSuchMethodException |
|
||||
SecurityException | InvocationTargetException e) {
|
||||
} catch (IllegalArgumentException | IllegalAccessException | ClassNotFoundException
|
||||
| NoSuchMethodException | SecurityException | InvocationTargetException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
|
@ -321,8 +313,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
cfg.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, fileListingPath.toString());
|
||||
cfg.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, totalRecords);
|
||||
} catch (NoSuchFieldException | SecurityException | IllegalArgumentException
|
||||
| IllegalAccessException | NoSuchMethodException | ClassNotFoundException
|
||||
| InvocationTargetException e) {
|
||||
| IllegalAccessException | NoSuchMethodException | ClassNotFoundException
|
||||
| InvocationTargetException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
return fileListingPath;
|
||||
|
@ -340,8 +332,8 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
}
|
||||
|
||||
private List<Path> getSourceFiles() throws NoSuchFieldException, SecurityException,
|
||||
IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
|
||||
ClassNotFoundException, InvocationTargetException, IOException {
|
||||
IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
|
||||
ClassNotFoundException, InvocationTargetException, IOException {
|
||||
Field options = null;
|
||||
try {
|
||||
options = DistCp.class.getDeclaredField("inputOptions");
|
||||
|
@ -352,8 +344,6 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
return getSourcePaths(options);
|
||||
}
|
||||
|
||||
|
||||
|
||||
private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException {
|
||||
FileSystem fs = pathToListFile.getFileSystem(conf);
|
||||
fs.delete(pathToListFile, false);
|
||||
|
@ -367,15 +357,15 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
|
||||
/**
|
||||
* Do backup copy based on different types.
|
||||
* @param context The backup info
|
||||
* @param conf The hadoop configuration
|
||||
* @param context The backup info
|
||||
* @param conf The hadoop configuration
|
||||
* @param copyType The backup copy type
|
||||
* @param options Options for customized ExportSnapshot or DistCp
|
||||
* @param options Options for customized ExportSnapshot or DistCp
|
||||
* @throws Exception exception
|
||||
*/
|
||||
@Override
|
||||
public int copy(BackupInfo context, BackupManager backupManager, Configuration conf,
|
||||
BackupType copyType, String[] options) throws IOException {
|
||||
BackupType copyType, String[] options) throws IOException {
|
||||
int res = 0;
|
||||
|
||||
try {
|
||||
|
@ -391,7 +381,7 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
|
|||
setSubTaskPercntgInWholeTask(1f);
|
||||
|
||||
BackupDistCp distcp =
|
||||
new BackupDistCp(new Configuration(conf), null, context, backupManager);
|
||||
new BackupDistCp(new Configuration(conf), null, context, backupManager);
|
||||
// Handle a special case where the source file is a single file.
|
||||
// In this case, distcp will not create the target dir. It just take the
|
||||
// target as a file name and copy source file to the target (as a file name).
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.hbase.backup.mapreduce;
|
||||
|
||||
import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
@ -52,9 +53,8 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* MapReduce implementation of {@link BackupMergeJob}
|
||||
* Must be initialized with configuration of a backup destination cluster
|
||||
*
|
||||
* MapReduce implementation of {@link BackupMergeJob} Must be initialized with configuration of a
|
||||
* backup destination cluster
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class MapReduceBackupMergeJob implements BackupMergeJob {
|
||||
|
@ -119,9 +119,8 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds);
|
||||
String dirs = StringUtils.join(dirPaths, ",");
|
||||
|
||||
Path bulkOutputPath =
|
||||
BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]),
|
||||
getConf(), false);
|
||||
Path bulkOutputPath = BackupUtils.getBulkOutputDir(
|
||||
BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false);
|
||||
// Delete content if exists
|
||||
if (fs.exists(bulkOutputPath)) {
|
||||
if (!fs.delete(bulkOutputPath, true)) {
|
||||
|
@ -136,7 +135,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
int result = player.run(playerArgs);
|
||||
if (!succeeded(result)) {
|
||||
throw new IOException("Can not merge backup images for " + dirs
|
||||
+ " (check Hadoop/MR and HBase logs). Player return code =" + result);
|
||||
+ " (check Hadoop/MR and HBase logs). Player return code =" + result);
|
||||
}
|
||||
// Add to processed table list
|
||||
processedTableList.add(new Pair<>(tableNames[i], bulkOutputPath));
|
||||
|
@ -149,14 +148,14 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
// PHASE 2 (modification of a backup file system)
|
||||
// Move existing mergedBackupId data into tmp directory
|
||||
// we will need it later in case of a failure
|
||||
Path tmpBackupDir = HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot,
|
||||
mergedBackupId);
|
||||
Path tmpBackupDir =
|
||||
HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, mergedBackupId);
|
||||
Path backupDirPath = HBackupFileSystem.getBackupPath(backupRoot, mergedBackupId);
|
||||
|
||||
if (!fs.rename(backupDirPath, tmpBackupDir)) {
|
||||
throw new IOException("Failed to rename "+ backupDirPath +" to "+tmpBackupDir);
|
||||
throw new IOException("Failed to rename " + backupDirPath + " to " + tmpBackupDir);
|
||||
} else {
|
||||
LOG.debug("Renamed "+ backupDirPath +" to "+ tmpBackupDir);
|
||||
LOG.debug("Renamed " + backupDirPath + " to " + tmpBackupDir);
|
||||
}
|
||||
// Move new data into backup dest
|
||||
for (Pair<TableName, Path> tn : processedTableList) {
|
||||
|
@ -170,7 +169,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
// Delete tmp dir (Rename back during repair)
|
||||
if (!fs.delete(tmpBackupDir, true)) {
|
||||
// WARN and ignore
|
||||
LOG.warn("Could not delete tmp dir: "+ tmpBackupDir);
|
||||
LOG.warn("Could not delete tmp dir: " + tmpBackupDir);
|
||||
}
|
||||
// Delete old data
|
||||
deleteBackupImages(backupsToDelete, conn, fs, backupRoot);
|
||||
|
@ -193,8 +192,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
} else {
|
||||
// backup repair must be run
|
||||
throw new IOException(
|
||||
"Backup merge operation failed, run backup repair tool to restore system's integrity",
|
||||
e);
|
||||
"Backup merge operation failed, run backup repair tool to restore system's integrity", e);
|
||||
}
|
||||
} finally {
|
||||
table.close();
|
||||
|
@ -204,13 +202,13 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
|
||||
/**
|
||||
* Copy meta data to of a backup session
|
||||
* @param fs file system
|
||||
* @param tmpBackupDir temp backup directory, where meta is locaed
|
||||
* @param fs file system
|
||||
* @param tmpBackupDir temp backup directory, where meta is locaed
|
||||
* @param backupDirPath new path for backup
|
||||
* @throws IOException exception
|
||||
*/
|
||||
protected void copyMetaData(FileSystem fs, Path tmpBackupDir, Path backupDirPath)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
RemoteIterator<LocatedFileStatus> it = fs.listFiles(tmpBackupDir, true);
|
||||
List<Path> toKeep = new ArrayList<Path>();
|
||||
while (it.hasNext()) {
|
||||
|
@ -220,8 +218,10 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
}
|
||||
// Keep meta
|
||||
String fileName = p.toString();
|
||||
if (fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0
|
||||
|| fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0) {
|
||||
if (
|
||||
fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0
|
||||
|| fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0
|
||||
) {
|
||||
toKeep.add(p);
|
||||
}
|
||||
}
|
||||
|
@ -234,8 +234,8 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
|
||||
/**
|
||||
* Copy file in DFS from p to newPath
|
||||
* @param fs file system
|
||||
* @param p old path
|
||||
* @param fs file system
|
||||
* @param p old path
|
||||
* @param newPath new path
|
||||
* @throws IOException exception
|
||||
*/
|
||||
|
@ -249,12 +249,12 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts path before copying
|
||||
* @param p path
|
||||
* @param backupDirPath backup root
|
||||
* @return converted path
|
||||
*/
|
||||
/**
|
||||
* Converts path before copying
|
||||
* @param p path
|
||||
* @param backupDirPath backup root
|
||||
* @return converted path
|
||||
*/
|
||||
protected Path convertToDest(Path p, Path backupDirPath) {
|
||||
String backupId = backupDirPath.getName();
|
||||
Stack<String> stack = new Stack<String>();
|
||||
|
@ -300,16 +300,16 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
}
|
||||
|
||||
protected void updateBackupManifest(String backupRoot, String mergedBackupId,
|
||||
List<String> backupsToDelete) throws IllegalArgumentException, IOException {
|
||||
List<String> backupsToDelete) throws IllegalArgumentException, IOException {
|
||||
BackupManifest manifest =
|
||||
HBackupFileSystem.getManifest(conf, new Path(backupRoot), mergedBackupId);
|
||||
HBackupFileSystem.getManifest(conf, new Path(backupRoot), mergedBackupId);
|
||||
manifest.getBackupImage().removeAncestors(backupsToDelete);
|
||||
// save back
|
||||
manifest.store(conf);
|
||||
}
|
||||
|
||||
protected void deleteBackupImages(List<String> backupIds, Connection conn, FileSystem fs,
|
||||
String backupRoot) throws IOException {
|
||||
String backupRoot) throws IOException {
|
||||
// Delete from backup system table
|
||||
try (BackupSystemTable table = new BackupSystemTable(conn)) {
|
||||
for (String backupId : backupIds) {
|
||||
|
@ -339,24 +339,24 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
}
|
||||
|
||||
protected void moveData(FileSystem fs, String backupRoot, Path bulkOutputPath,
|
||||
TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException {
|
||||
TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException {
|
||||
Path dest =
|
||||
new Path(HBackupFileSystem.getTableBackupDir(backupRoot, mergedBackupId, tableName));
|
||||
new Path(HBackupFileSystem.getTableBackupDir(backupRoot, mergedBackupId, tableName));
|
||||
|
||||
FileStatus[] fsts = fs.listStatus(bulkOutputPath);
|
||||
for (FileStatus fst : fsts) {
|
||||
if (fst.isDirectory()) {
|
||||
String family = fst.getPath().getName();
|
||||
String family = fst.getPath().getName();
|
||||
Path newDst = new Path(dest, family);
|
||||
if (fs.exists(newDst)) {
|
||||
if (!fs.delete(newDst, true)) {
|
||||
throw new IOException("failed to delete :"+ newDst);
|
||||
throw new IOException("failed to delete :" + newDst);
|
||||
}
|
||||
} else {
|
||||
fs.mkdirs(dest);
|
||||
}
|
||||
boolean result = fs.rename(fst.getPath(), dest);
|
||||
LOG.debug("MoveData from "+ fst.getPath() +" to "+ dest+" result="+ result);
|
||||
LOG.debug("MoveData from " + fst.getPath() + " to " + dest + " result=" + result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -365,7 +365,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
Set<TableName> allSet = new HashSet<>();
|
||||
|
||||
try (Connection conn = ConnectionFactory.createConnection(conf);
|
||||
BackupSystemTable table = new BackupSystemTable(conn)) {
|
||||
BackupSystemTable table = new BackupSystemTable(conn)) {
|
||||
for (String backupId : backupIds) {
|
||||
BackupInfo bInfo = table.readBackupInfo(backupId);
|
||||
|
||||
|
@ -378,12 +378,12 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
|
|||
}
|
||||
|
||||
protected Path[] findInputDirectories(FileSystem fs, String backupRoot, TableName tableName,
|
||||
String[] backupIds) throws IOException {
|
||||
String[] backupIds) throws IOException {
|
||||
List<Path> dirs = new ArrayList<>();
|
||||
|
||||
for (String backupId : backupIds) {
|
||||
Path fileBackupDirPath =
|
||||
new Path(HBackupFileSystem.getTableBackupDir(backupRoot, backupId, tableName));
|
||||
new Path(HBackupFileSystem.getTableBackupDir(backupRoot, backupId, tableName));
|
||||
if (fs.exists(fileBackupDirPath)) {
|
||||
dirs.add(fileBackupDirPath);
|
||||
} else {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -18,7 +18,6 @@
|
|||
package org.apache.hadoop.hbase.backup.mapreduce;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -69,17 +68,15 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
|
|||
}
|
||||
|
||||
/**
|
||||
* A mapper that just writes out cells. This one can be used together with
|
||||
* {@link CellSortReducer}
|
||||
* A mapper that just writes out cells. This one can be used together with {@link CellSortReducer}
|
||||
*/
|
||||
static class HFileCellMapper extends
|
||||
Mapper<NullWritable, Cell, ImmutableBytesWritable, Cell> {
|
||||
static class HFileCellMapper extends Mapper<NullWritable, Cell, ImmutableBytesWritable, Cell> {
|
||||
|
||||
@Override
|
||||
public void map(NullWritable key, Cell value, Context context)
|
||||
throws IOException, InterruptedException {
|
||||
throws IOException, InterruptedException {
|
||||
context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)),
|
||||
new MapReduceExtendedCell(value));
|
||||
new MapReduceExtendedCell(value));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,9 +97,8 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
|
|||
String tabName = args[1];
|
||||
conf.setStrings(TABLES_KEY, tabName);
|
||||
conf.set(FileInputFormat.INPUT_DIR, inputDirs);
|
||||
Job job =
|
||||
Job.getInstance(conf,
|
||||
conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime()));
|
||||
Job job = Job.getInstance(conf,
|
||||
conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime()));
|
||||
job.setJarByClass(MapReduceHFileSplitterJob.class);
|
||||
job.setInputFormatClass(HFileInputFormat.class);
|
||||
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
|
||||
|
@ -116,8 +112,8 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
|
|||
FileOutputFormat.setOutputPath(job, outputDir);
|
||||
job.setMapOutputValueClass(MapReduceExtendedCell.class);
|
||||
try (Connection conn = ConnectionFactory.createConnection(conf);
|
||||
Table table = conn.getTable(tableName);
|
||||
RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
|
||||
Table table = conn.getTable(tableName);
|
||||
RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
|
||||
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
|
||||
}
|
||||
LOG.debug("success configuring load incremental job");
|
||||
|
@ -145,9 +141,9 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
|
|||
System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output");
|
||||
System.err.println("Other options:");
|
||||
System.err.println(" -D " + JOB_NAME_CONF_KEY
|
||||
+ "=jobName - use the specified mapreduce job name for the HFile splitter");
|
||||
+ "=jobName - use the specified mapreduce job name for the HFile splitter");
|
||||
System.err.println("For performance also consider the following options:\n"
|
||||
+ " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false");
|
||||
+ " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
@ -34,13 +34,10 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* MapReduce implementation of {@link RestoreJob}
|
||||
*
|
||||
* For backup restore, it runs {@link MapReduceHFileSplitterJob} job and creates
|
||||
* HFiles which are aligned with a region boundaries of a table being
|
||||
* restored.
|
||||
*
|
||||
* The resulting HFiles then are loaded using HBase bulk load tool {@link BulkLoadHFiles}.
|
||||
* MapReduce implementation of {@link RestoreJob} For backup restore, it runs
|
||||
* {@link MapReduceHFileSplitterJob} job and creates HFiles which are aligned with a region
|
||||
* boundaries of a table being restored. The resulting HFiles then are loaded using HBase bulk load
|
||||
* tool {@link BulkLoadHFiles}.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class MapReduceRestoreJob implements RestoreJob {
|
||||
|
@ -54,7 +51,7 @@ public class MapReduceRestoreJob implements RestoreJob {
|
|||
|
||||
@Override
|
||||
public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNames,
|
||||
boolean fullBackupRestore) throws IOException {
|
||||
boolean fullBackupRestore) throws IOException {
|
||||
String bulkOutputConfKey;
|
||||
|
||||
player = new MapReduceHFileSplitterJob();
|
||||
|
@ -65,24 +62,21 @@ public class MapReduceRestoreJob implements RestoreJob {
|
|||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Restore " + (fullBackupRestore ? "full" : "incremental")
|
||||
+ " backup from directory " + dirs + " from hbase tables "
|
||||
+ StringUtils.join(tableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND)
|
||||
+ " to tables "
|
||||
+ StringUtils.join(newTableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND));
|
||||
+ " backup from directory " + dirs + " from hbase tables "
|
||||
+ StringUtils.join(tableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND)
|
||||
+ " to tables "
|
||||
+ StringUtils.join(newTableNames, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND));
|
||||
}
|
||||
|
||||
for (int i = 0; i < tableNames.length; i++) {
|
||||
LOG.info("Restore " + tableNames[i] + " into " + newTableNames[i]);
|
||||
|
||||
Path bulkOutputPath =
|
||||
BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]),
|
||||
getConf());
|
||||
Path bulkOutputPath = BackupUtils
|
||||
.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]), getConf());
|
||||
Configuration conf = getConf();
|
||||
conf.set(bulkOutputConfKey, bulkOutputPath.toString());
|
||||
String[] playerArgs = {
|
||||
dirs, fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i]
|
||||
.getNameAsString()
|
||||
};
|
||||
String[] playerArgs = { dirs,
|
||||
fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i].getNameAsString() };
|
||||
|
||||
int result;
|
||||
try {
|
||||
|
@ -97,18 +91,18 @@ public class MapReduceRestoreJob implements RestoreJob {
|
|||
}
|
||||
|
||||
if (loader.bulkLoad(newTableNames[i], bulkOutputPath).isEmpty()) {
|
||||
throw new IOException("Can not restore from backup directory " + dirs +
|
||||
" (check Hadoop and HBase logs). Bulk loader returns null");
|
||||
throw new IOException("Can not restore from backup directory " + dirs
|
||||
+ " (check Hadoop and HBase logs). Bulk loader returns null");
|
||||
}
|
||||
} else {
|
||||
throw new IOException("Can not restore from backup directory " + dirs
|
||||
+ " (check Hadoop/MR and HBase logs). Player return code =" + result);
|
||||
+ " (check Hadoop/MR and HBase logs). Player return code =" + result);
|
||||
}
|
||||
LOG.debug("Restore Job finished:" + result);
|
||||
} catch (Exception e) {
|
||||
LOG.error(e.toString(), e);
|
||||
throw new IOException("Can not restore from backup directory " + dirs
|
||||
+ " (check Hadoop and HBase logs) ", e);
|
||||
throw new IOException(
|
||||
"Can not restore from backup directory " + dirs + " (check Hadoop and HBase logs) ", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -43,6 +42,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils;
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils;
|
||||
|
||||
|
@ -62,8 +62,7 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
|
|||
|
||||
@Override
|
||||
public void init(Map<String, Object> params) {
|
||||
MasterServices master = (MasterServices) MapUtils.getObject(params,
|
||||
HMaster.MASTER);
|
||||
MasterServices master = (MasterServices) MapUtils.getObject(params, HMaster.MASTER);
|
||||
if (master != null) {
|
||||
conn = master.getConnection();
|
||||
if (getConf() == null) {
|
||||
|
@ -79,7 +78,6 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
private Map<Address, Long> getServersToOldestBackupMapping(List<BackupInfo> backups)
|
||||
throws IOException {
|
||||
Map<Address, Long> serverAddressToLastBackupMap = new HashMap<>();
|
||||
|
@ -136,8 +134,10 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
|
|||
Address.fromString(BackupUtils.parseHostNameFromLogFile(file.getPath()));
|
||||
long walTimestamp = AbstractFSWALProvider.getTimestamp(file.getPath().getName());
|
||||
|
||||
if (!addressToLastBackupMap.containsKey(walServerAddress)
|
||||
|| addressToLastBackupMap.get(walServerAddress) >= walTimestamp) {
|
||||
if (
|
||||
!addressToLastBackupMap.containsKey(walServerAddress)
|
||||
|| addressToLastBackupMap.get(walServerAddress) >= walTimestamp
|
||||
) {
|
||||
filteredFiles.add(file);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
|
@ -147,8 +147,8 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
|
|||
}
|
||||
}
|
||||
|
||||
LOG
|
||||
.info("Total files: {}, Filtered Files: {}", IterableUtils.size(files), filteredFiles.size());
|
||||
LOG.info("Total files: {}, Filtered Files: {}", IterableUtils.size(files),
|
||||
filteredFiles.size());
|
||||
return filteredFiles;
|
||||
}
|
||||
|
||||
|
@ -156,8 +156,10 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
|
|||
public void setConf(Configuration config) {
|
||||
// If backup is disabled, keep all members null
|
||||
super.setConf(config);
|
||||
if (!config.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
|
||||
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) {
|
||||
if (
|
||||
!config.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
|
||||
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)
|
||||
) {
|
||||
LOG.warn("Backup is disabled - allowing all wals to be deleted");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
@ -15,14 +15,12 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
|
||||
|
@ -61,7 +59,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
|
|||
public static final String BACKUP_WAKE_MILLIS_KEY = "hbase.backup.logroll.wake.millis";
|
||||
public static final String BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.logroll.timeout.millis";
|
||||
public static final String BACKUP_POOL_THREAD_NUMBER_KEY =
|
||||
"hbase.backup.logroll.pool.thread.number";
|
||||
"hbase.backup.logroll.pool.thread.number";
|
||||
|
||||
public static final int BACKUP_WAKE_MILLIS_DEFAULT = 500;
|
||||
public static final int BACKUP_TIMEOUT_MILLIS_DEFAULT = 180000;
|
||||
|
@ -82,26 +80,24 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
|
|||
|
||||
@Override
|
||||
public void initialize(MasterServices master, MetricsMaster metricsMaster)
|
||||
throws IOException, UnsupportedOperationException {
|
||||
throws IOException, UnsupportedOperationException {
|
||||
this.master = master;
|
||||
this.done = false;
|
||||
|
||||
// setup the default procedure coordinator
|
||||
String name = master.getServerName().toString();
|
||||
|
||||
|
||||
// get the configuration for the coordinator
|
||||
Configuration conf = master.getConfiguration();
|
||||
long wakeFrequency = conf.getInt(BACKUP_WAKE_MILLIS_KEY, BACKUP_WAKE_MILLIS_DEFAULT);
|
||||
long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY,BACKUP_TIMEOUT_MILLIS_DEFAULT);
|
||||
int opThreads = conf.getInt(BACKUP_POOL_THREAD_NUMBER_KEY,
|
||||
BACKUP_POOL_THREAD_NUMBER_DEFAULT);
|
||||
long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT);
|
||||
int opThreads = conf.getInt(BACKUP_POOL_THREAD_NUMBER_KEY, BACKUP_POOL_THREAD_NUMBER_DEFAULT);
|
||||
|
||||
// setup the default procedure coordinator
|
||||
ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, opThreads);
|
||||
ProcedureCoordinationManager coordManager = new ZKProcedureCoordinationManager(master);
|
||||
ProcedureCoordinatorRpcs comms =
|
||||
coordManager.getProcedureCoordinatorRpcs(getProcedureSignature(), name);
|
||||
coordManager.getProcedureCoordinatorRpcs(getProcedureSignature(), name);
|
||||
this.coordinator = new ProcedureCoordinator(comms, tpool, timeoutMillis, wakeFrequency);
|
||||
|
||||
}
|
||||
|
@ -115,7 +111,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
|
|||
public void execProcedure(ProcedureDescription desc) throws IOException {
|
||||
if (!isBackupEnabled()) {
|
||||
LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY
|
||||
+ " setting");
|
||||
+ " setting");
|
||||
return;
|
||||
}
|
||||
this.done = false;
|
||||
|
@ -149,12 +145,12 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
|
|||
this.done = true;
|
||||
} catch (InterruptedException e) {
|
||||
ForeignException ee =
|
||||
new ForeignException("Interrupted while waiting for roll log procdure to finish", e);
|
||||
new ForeignException("Interrupted while waiting for roll log procdure to finish", e);
|
||||
monitor.receive(ee);
|
||||
Thread.currentThread().interrupt();
|
||||
} catch (ForeignException e) {
|
||||
ForeignException ee =
|
||||
new ForeignException("Exception while waiting for roll log procdure to finish", e);
|
||||
new ForeignException("Exception while waiting for roll log procdure to finish", e);
|
||||
monitor.receive(ee);
|
||||
}
|
||||
monitor.rethrowException();
|
||||
|
@ -162,7 +158,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
|
|||
|
||||
@Override
|
||||
public void checkPermissions(ProcedureDescription desc, AccessChecker accessChecker, User user)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
// TODO: what permissions checks are needed here?
|
||||
}
|
||||
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
@ -15,13 +15,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.regionserver;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
||||
import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
|
@ -50,10 +48,10 @@ public class LogRollBackupSubprocedure extends Subprocedure {
|
|||
private String backupRoot;
|
||||
|
||||
public LogRollBackupSubprocedure(RegionServerServices rss, ProcedureMember member,
|
||||
ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout,
|
||||
LogRollBackupSubprocedurePool taskManager, byte[] data) {
|
||||
ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout,
|
||||
LogRollBackupSubprocedurePool taskManager, byte[] data) {
|
||||
super(member, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, errorListener,
|
||||
wakeFrequency, timeout);
|
||||
wakeFrequency, timeout);
|
||||
LOG.info("Constructing a LogRollBackupSubprocedure.");
|
||||
this.rss = rss;
|
||||
this.taskManager = taskManager;
|
||||
|
@ -91,7 +89,7 @@ public class LogRollBackupSubprocedure extends Subprocedure {
|
|||
}
|
||||
|
||||
LOG.info("Trying to roll log in backup subprocedure, current log number: " + filenum
|
||||
+ " highest: " + highest + " on " + rss.getServerName());
|
||||
+ " highest: " + highest + " on " + rss.getServerName());
|
||||
((HRegionServer) rss).getWalRoller().requestRollAll();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
while (!((HRegionServer) rss).getWalRoller().walRollFinished()) {
|
||||
|
@ -99,20 +97,20 @@ public class LogRollBackupSubprocedure extends Subprocedure {
|
|||
}
|
||||
LOG.debug("log roll took " + (EnvironmentEdgeManager.currentTime() - start));
|
||||
LOG.info("After roll log in backup subprocedure, current log number: " + fsWAL.getFilenum()
|
||||
+ " on " + rss.getServerName());
|
||||
+ " on " + rss.getServerName());
|
||||
|
||||
Connection connection = rss.getConnection();
|
||||
try (final BackupSystemTable table = new BackupSystemTable(connection)) {
|
||||
// sanity check, good for testing
|
||||
HashMap<String, Long> serverTimestampMap =
|
||||
table.readRegionServerLastLogRollResult(backupRoot);
|
||||
table.readRegionServerLastLogRollResult(backupRoot);
|
||||
String host = rss.getServerName().getHostname();
|
||||
int port = rss.getServerName().getPort();
|
||||
String server = host + ":" + port;
|
||||
Long sts = serverTimestampMap.get(host);
|
||||
if (sts != null && sts > highest) {
|
||||
LOG.warn("Won't update server's last roll log result: current=" + sts + " new="
|
||||
+ highest);
|
||||
LOG
|
||||
.warn("Won't update server's last roll log result: current=" + sts + " new=" + highest);
|
||||
return null;
|
||||
}
|
||||
// write the log number to backup system table.
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.regionserver;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
@ -28,19 +27,18 @@ import java.util.concurrent.Future;
|
|||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Abortable;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
|
||||
/**
|
||||
* Handle running each of the individual tasks for completing a backup procedure on a region
|
||||
* server.
|
||||
* Handle running each of the individual tasks for completing a backup procedure on a region server.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class LogRollBackupSubprocedurePool implements Closeable, Abortable {
|
||||
|
@ -58,9 +56,8 @@ public class LogRollBackupSubprocedurePool implements Closeable, Abortable {
|
|||
|
||||
public LogRollBackupSubprocedurePool(String name, Configuration conf) {
|
||||
// configure the executor service
|
||||
long keepAlive =
|
||||
conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY,
|
||||
LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT);
|
||||
long keepAlive = conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY,
|
||||
LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT);
|
||||
int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS);
|
||||
this.name = name;
|
||||
executor =
|
||||
|
@ -94,7 +91,7 @@ public class LogRollBackupSubprocedurePool implements Closeable, Abortable {
|
|||
} catch (InterruptedException e) {
|
||||
if (aborted) {
|
||||
throw new ForeignException("Interrupted and found to be aborted while waiting for tasks!",
|
||||
e);
|
||||
e);
|
||||
}
|
||||
Thread.currentThread().interrupt();
|
||||
} catch (ExecutionException e) {
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
@ -15,12 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.regionserver;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
|
||||
import org.apache.hadoop.hbase.backup.impl.BackupManager;
|
||||
|
@ -53,7 +51,7 @@ import org.slf4j.LoggerFactory;
|
|||
@InterfaceAudience.Private
|
||||
public class LogRollRegionServerProcedureManager extends RegionServerProcedureManager {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(LogRollRegionServerProcedureManager.class);
|
||||
LoggerFactory.getLogger(LogRollRegionServerProcedureManager.class);
|
||||
|
||||
/** Conf key for number of request threads to start backup on region servers */
|
||||
public static final String BACKUP_REQUEST_THREADS_KEY = "hbase.backup.region.pool.threads";
|
||||
|
@ -86,7 +84,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
|
|||
public void start() {
|
||||
if (!BackupManager.isBackupEnabled(rss.getConfiguration())) {
|
||||
LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY
|
||||
+ " setting");
|
||||
+ " setting");
|
||||
return;
|
||||
}
|
||||
this.memberRpcs.start(rss.getServerName().toString(), member);
|
||||
|
@ -122,7 +120,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
|
|||
// don't run a backup if the parent is stop(ping)
|
||||
if (rss.isStopping() || rss.isStopped()) {
|
||||
throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName()
|
||||
+ ", because stopping/stopped!");
|
||||
+ ", because stopping/stopped!");
|
||||
}
|
||||
|
||||
LOG.info("Attempting to run a roll log procedure for backup.");
|
||||
|
@ -130,12 +128,12 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
|
|||
Configuration conf = rss.getConfiguration();
|
||||
long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT);
|
||||
long wakeMillis =
|
||||
conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT);
|
||||
conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT);
|
||||
|
||||
LogRollBackupSubprocedurePool taskManager =
|
||||
new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf);
|
||||
new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf);
|
||||
return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis,
|
||||
taskManager, data);
|
||||
taskManager, data);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -153,12 +151,12 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
|
|||
this.rss = rss;
|
||||
if (!BackupManager.isBackupEnabled(rss.getConfiguration())) {
|
||||
LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY
|
||||
+ " setting");
|
||||
+ " setting");
|
||||
return;
|
||||
}
|
||||
ProcedureCoordinationManager coordManager = new ZKProcedureCoordinationManager(rss);
|
||||
this.memberRpcs = coordManager
|
||||
.getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE);
|
||||
.getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE);
|
||||
|
||||
// read in the backup handler configuration properties
|
||||
Configuration conf = rss.getConfiguration();
|
||||
|
@ -166,7 +164,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
|
|||
int opThreads = conf.getInt(BACKUP_REQUEST_THREADS_KEY, BACKUP_REQUEST_THREADS_DEFAULT);
|
||||
// create the actual cohort member
|
||||
ThreadPoolExecutor pool =
|
||||
ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive);
|
||||
ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive);
|
||||
this.member = new ProcedureMember(memberRpcs, pool, new BackupSubprocedureBuilder());
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -18,7 +18,6 @@
|
|||
package org.apache.hadoop.hbase.backup.util;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.util;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
|
@ -83,8 +82,8 @@ public final class BackupUtils {
|
|||
* @param rsLogTimestampMap timestamp map
|
||||
* @return the min timestamp of each RS
|
||||
*/
|
||||
public static Map<String, Long> getRSLogTimestampMins(
|
||||
Map<TableName, Map<String, Long>> rsLogTimestampMap) {
|
||||
public static Map<String, Long>
|
||||
getRSLogTimestampMins(Map<TableName, Map<String, Long>> rsLogTimestampMap) {
|
||||
if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
@ -114,13 +113,13 @@ public final class BackupUtils {
|
|||
/**
|
||||
* copy out Table RegionInfo into incremental backup image need to consider move this logic into
|
||||
* HBackupFileSystem
|
||||
* @param conn connection
|
||||
* @param conn connection
|
||||
* @param backupInfo backup info
|
||||
* @param conf configuration
|
||||
* @param conf configuration
|
||||
* @throws IOException exception
|
||||
*/
|
||||
public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
Path rootDir = CommonFSUtils.getRootDir(conf);
|
||||
FileSystem fs = rootDir.getFileSystem(conf);
|
||||
|
||||
|
@ -140,8 +139,8 @@ public final class BackupUtils {
|
|||
FSTableDescriptors descriptors =
|
||||
new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf));
|
||||
descriptors.createTableDescriptorForTableDirectory(target, orig, false);
|
||||
LOG.debug("Attempting to copy table info for:" + table + " target: " + target +
|
||||
" descriptor: " + orig);
|
||||
LOG.debug("Attempting to copy table info for:" + table + " target: " + target
|
||||
+ " descriptor: " + orig);
|
||||
LOG.debug("Finished copying tableinfo.");
|
||||
List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
|
||||
// For each region, write the region info to disk
|
||||
|
@ -161,7 +160,7 @@ public final class BackupUtils {
|
|||
* Write the .regioninfo file on-disk.
|
||||
*/
|
||||
public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs,
|
||||
final Path regionInfoDir, RegionInfo regionInfo) throws IOException {
|
||||
final Path regionInfoDir, RegionInfo regionInfo) throws IOException {
|
||||
final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo);
|
||||
Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR);
|
||||
// First check to get the permissions
|
||||
|
@ -219,7 +218,7 @@ public final class BackupUtils {
|
|||
|
||||
/**
|
||||
* Get the total length of files under the given directory recursively.
|
||||
* @param fs The hadoop file system
|
||||
* @param fs The hadoop file system
|
||||
* @param dir The target directory
|
||||
* @return the total length of files
|
||||
* @throws IOException exception
|
||||
|
@ -241,13 +240,13 @@ public final class BackupUtils {
|
|||
|
||||
/**
|
||||
* Get list of all old WAL files (WALs and archive)
|
||||
* @param c configuration
|
||||
* @param c configuration
|
||||
* @param hostTimestampMap {host,timestamp} map
|
||||
* @return list of WAL files
|
||||
* @throws IOException exception
|
||||
*/
|
||||
public static List<String> getWALFilesOlderThan(final Configuration c,
|
||||
final HashMap<String, Long> hostTimestampMap) throws IOException {
|
||||
final HashMap<String, Long> hostTimestampMap) throws IOException {
|
||||
Path walRootDir = CommonFSUtils.getWALRootDir(c);
|
||||
Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
|
||||
Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
|
||||
|
@ -292,7 +291,7 @@ public final class BackupUtils {
|
|||
/**
|
||||
* Check whether the backup path exist
|
||||
* @param backupStr backup
|
||||
* @param conf configuration
|
||||
* @param conf configuration
|
||||
* @return Yes if path exists
|
||||
* @throws IOException exception
|
||||
*/
|
||||
|
@ -313,7 +312,7 @@ public final class BackupUtils {
|
|||
/**
|
||||
* Check target path first, confirm it doesn't exist before backup
|
||||
* @param backupRootPath backup destination path
|
||||
* @param conf configuration
|
||||
* @param conf configuration
|
||||
* @throws IOException exception
|
||||
*/
|
||||
public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException {
|
||||
|
@ -325,8 +324,7 @@ public final class BackupUtils {
|
|||
String newMsg = null;
|
||||
if (expMsg.contains("No FileSystem for scheme")) {
|
||||
newMsg =
|
||||
"Unsupported filesystem scheme found in the backup target url. Error Message: "
|
||||
+ expMsg;
|
||||
"Unsupported filesystem scheme found in the backup target url. Error Message: " + expMsg;
|
||||
LOG.error(newMsg);
|
||||
throw new IOException(newMsg);
|
||||
} else {
|
||||
|
@ -390,7 +388,7 @@ public final class BackupUtils {
|
|||
}
|
||||
|
||||
public static List<String> getFiles(FileSystem fs, Path rootDir, List<String> files,
|
||||
PathFilter filter) throws IOException {
|
||||
PathFilter filter) throws IOException {
|
||||
RemoteIterator<LocatedFileStatus> it = fs.listFiles(rootDir, true);
|
||||
|
||||
while (it.hasNext()) {
|
||||
|
@ -414,7 +412,7 @@ public final class BackupUtils {
|
|||
/**
|
||||
* Clean up directories which are generated when DistCp copying hlogs
|
||||
* @param backupInfo backup info
|
||||
* @param conf configuration
|
||||
* @param conf configuration
|
||||
* @throws IOException exception
|
||||
*/
|
||||
private static void cleanupHLogDir(BackupInfo backupInfo, Configuration conf) throws IOException {
|
||||
|
@ -449,9 +447,8 @@ public final class BackupUtils {
|
|||
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
|
||||
|
||||
for (TableName table : backupInfo.getTables()) {
|
||||
Path targetDirPath =
|
||||
new Path(getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(),
|
||||
table));
|
||||
Path targetDirPath = new Path(
|
||||
getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
|
||||
if (outputFs.delete(targetDirPath, true)) {
|
||||
LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
|
||||
} else {
|
||||
|
@ -468,7 +465,7 @@ public final class BackupUtils {
|
|||
outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true);
|
||||
} catch (IOException e1) {
|
||||
LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at "
|
||||
+ backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
|
||||
+ backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -477,15 +474,15 @@ public final class BackupUtils {
|
|||
* which is also where the backup manifest file is. return value look like:
|
||||
* "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/"
|
||||
* @param backupRootDir backup root directory
|
||||
* @param backupId backup id
|
||||
* @param tableName table name
|
||||
* @param backupId backup id
|
||||
* @param tableName table name
|
||||
* @return backupPath String for the particular table
|
||||
*/
|
||||
public static String getTableBackupDir(String backupRootDir, String backupId,
|
||||
TableName tableName) {
|
||||
TableName tableName) {
|
||||
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
|
||||
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
|
||||
+ Path.SEPARATOR;
|
||||
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
|
||||
+ Path.SEPARATOR;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -510,13 +507,13 @@ public final class BackupUtils {
|
|||
* Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates
|
||||
* differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and
|
||||
* return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException.
|
||||
* @param fs file system
|
||||
* @param dir directory
|
||||
* @param fs file system
|
||||
* @param dir directory
|
||||
* @param filter path filter
|
||||
* @return null if dir is empty or doesn't exist, otherwise FileStatus array
|
||||
*/
|
||||
public static FileStatus[] listStatus(final FileSystem fs, final Path dir,
|
||||
final PathFilter filter) throws IOException {
|
||||
final PathFilter filter) throws IOException {
|
||||
FileStatus[] status = null;
|
||||
try {
|
||||
status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
|
||||
|
@ -535,8 +532,8 @@ public final class BackupUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the
|
||||
* 'path' component of a Path's URI: e.g. If a Path is
|
||||
* Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the 'path'
|
||||
* component of a Path's URI: e.g. If a Path is
|
||||
* <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns
|
||||
* <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print
|
||||
* out a Path without qualifying Filesystem instance.
|
||||
|
@ -551,16 +548,16 @@ public final class BackupUtils {
|
|||
* Given the backup root dir and the backup id, return the log file location for an incremental
|
||||
* backup.
|
||||
* @param backupRootDir backup root directory
|
||||
* @param backupId backup id
|
||||
* @param backupId backup id
|
||||
* @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
|
||||
*/
|
||||
public static String getLogBackupDir(String backupRootDir, String backupId) {
|
||||
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
|
||||
+ HConstants.HREGION_LOGDIR_NAME;
|
||||
+ HConstants.HREGION_LOGDIR_NAME;
|
||||
}
|
||||
|
||||
private static List<BackupInfo> getHistory(Configuration conf, Path backupRootPath)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
// Get all (n) history from backup root destination
|
||||
|
||||
FileSystem fs = FileSystem.get(backupRootPath.toUri(), conf);
|
||||
|
@ -605,7 +602,7 @@ public final class BackupUtils {
|
|||
}
|
||||
|
||||
public static List<BackupInfo> getHistory(Configuration conf, int n, Path backupRootPath,
|
||||
BackupInfo.Filter... filters) throws IOException {
|
||||
BackupInfo.Filter... filters) throws IOException {
|
||||
List<BackupInfo> infos = getHistory(conf, backupRootPath);
|
||||
List<BackupInfo> ret = new ArrayList<>();
|
||||
for (BackupInfo info : infos) {
|
||||
|
@ -627,7 +624,7 @@ public final class BackupUtils {
|
|||
}
|
||||
|
||||
public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
Path backupPath = new Path(backupRootPath, backupId);
|
||||
|
||||
RemoteIterator<LocatedFileStatus> it = fs.listFiles(backupPath, true);
|
||||
|
@ -646,24 +643,24 @@ public final class BackupUtils {
|
|||
/**
|
||||
* Create restore request.
|
||||
* @param backupRootDir backup root dir
|
||||
* @param backupId backup id
|
||||
* @param check check only
|
||||
* @param fromTables table list from
|
||||
* @param toTables table list to
|
||||
* @param isOverwrite overwrite data
|
||||
* @param backupId backup id
|
||||
* @param check check only
|
||||
* @param fromTables table list from
|
||||
* @param toTables table list to
|
||||
* @param isOverwrite overwrite data
|
||||
* @return request obkect
|
||||
*/
|
||||
public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId,
|
||||
boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
|
||||
boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
|
||||
RestoreRequest.Builder builder = new RestoreRequest.Builder();
|
||||
RestoreRequest request =
|
||||
builder.withBackupRootDir(backupRootDir).withBackupId(backupId).withCheck(check)
|
||||
.withFromTables(fromTables).withToTables(toTables).withOvewrite(isOverwrite).build();
|
||||
builder.withBackupRootDir(backupRootDir).withBackupId(backupId).withCheck(check)
|
||||
.withFromTables(fromTables).withToTables(toTables).withOvewrite(isOverwrite).build();
|
||||
return request;
|
||||
}
|
||||
|
||||
public static boolean validate(HashMap<TableName, BackupManifest> backupManifestMap,
|
||||
Configuration conf) throws IOException {
|
||||
Configuration conf) throws IOException {
|
||||
boolean isValid = true;
|
||||
|
||||
for (Entry<TableName, BackupManifest> manifestEntry : backupManifestMap.entrySet()) {
|
||||
|
@ -678,7 +675,7 @@ public final class BackupUtils {
|
|||
LOG.info("Dependent image(s) from old to new:");
|
||||
for (BackupImage image : imageSet) {
|
||||
String imageDir =
|
||||
HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table);
|
||||
HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table);
|
||||
if (!BackupUtils.checkPathExist(imageDir, conf)) {
|
||||
LOG.error("ERROR: backup image does not exist: " + imageDir);
|
||||
isValid = false;
|
||||
|
@ -691,13 +688,12 @@ public final class BackupUtils {
|
|||
}
|
||||
|
||||
public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
String tmp = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
|
||||
fs.getHomeDirectory() + "/hbase-staging");
|
||||
Path path =
|
||||
new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-"
|
||||
+ EnvironmentEdgeManager.currentTime());
|
||||
String tmp =
|
||||
conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging");
|
||||
Path path = new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-"
|
||||
+ EnvironmentEdgeManager.currentTime());
|
||||
if (deleteOnExit) {
|
||||
fs.deleteOnExit(path);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,7 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.backup.util;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
|
@ -75,7 +74,7 @@ public class RestoreTool {
|
|||
private final HashMap<TableName, Path> snapshotMap = new HashMap<>();
|
||||
|
||||
public RestoreTool(Configuration conf, final Path backupRootPath, final String backupId)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
this.conf = conf;
|
||||
this.backupRootPath = backupRootPath;
|
||||
this.backupId = backupId;
|
||||
|
@ -91,8 +90,8 @@ public class RestoreTool {
|
|||
*/
|
||||
Path getTableArchivePath(TableName tableName) throws IOException {
|
||||
Path baseDir =
|
||||
new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
|
||||
HConstants.HFILE_ARCHIVE_DIRECTORY);
|
||||
new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
|
||||
HConstants.HFILE_ARCHIVE_DIRECTORY);
|
||||
Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
|
||||
Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
|
||||
Path tableArchivePath = new Path(archivePath, tableName.getQualifierAsString());
|
||||
|
@ -142,16 +141,16 @@ public class RestoreTool {
|
|||
* During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently
|
||||
* tableNames and newTablesNames only contain single table, will be expanded to multiple tables in
|
||||
* the future
|
||||
* @param conn HBase connection
|
||||
* @param conn HBase connection
|
||||
* @param tableBackupPath backup path
|
||||
* @param logDirs : incremental backup folders, which contains WAL
|
||||
* @param tableNames : source tableNames(table names were backuped)
|
||||
* @param newTableNames : target tableNames(table names to be restored to)
|
||||
* @param incrBackupId incremental backup Id
|
||||
* @param logDirs : incremental backup folders, which contains WAL
|
||||
* @param tableNames : source tableNames(table names were backuped)
|
||||
* @param newTableNames : target tableNames(table names to be restored to)
|
||||
* @param incrBackupId incremental backup Id
|
||||
* @throws IOException exception
|
||||
*/
|
||||
public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs,
|
||||
TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
|
||||
TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
|
||||
try (Admin admin = conn.getAdmin()) {
|
||||
if (tableNames.length != newTableNames.length) {
|
||||
throw new IOException("Number of source tables and target tables does not match!");
|
||||
|
@ -163,7 +162,7 @@ public class RestoreTool {
|
|||
for (TableName tableName : newTableNames) {
|
||||
if (!admin.tableExists(tableName)) {
|
||||
throw new IOException("HBase table " + tableName
|
||||
+ " does not exist. Create the table first, e.g. by restoring a full backup.");
|
||||
+ " does not exist. Create the table first, e.g. by restoring a full backup.");
|
||||
}
|
||||
}
|
||||
// adjust table schema
|
||||
|
@ -179,7 +178,7 @@ public class RestoreTool {
|
|||
TableDescriptor newTableDescriptor = admin.getDescriptor(newTableName);
|
||||
List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
|
||||
List<ColumnFamilyDescriptor> existingFamilies =
|
||||
Arrays.asList(newTableDescriptor.getColumnFamilies());
|
||||
Arrays.asList(newTableDescriptor.getColumnFamilies());
|
||||
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor);
|
||||
boolean schemaChangeNeeded = false;
|
||||
for (ColumnFamilyDescriptor family : families) {
|
||||
|
@ -206,8 +205,7 @@ public class RestoreTool {
|
|||
}
|
||||
|
||||
public void fullRestoreTable(Connection conn, Path tableBackupPath, TableName tableName,
|
||||
TableName newTableName, boolean truncateIfExists, String lastIncrBackupId)
|
||||
throws IOException {
|
||||
TableName newTableName, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
|
||||
createAndRestoreTable(conn, tableName, newTableName, tableBackupPath, truncateIfExists,
|
||||
lastIncrBackupId);
|
||||
}
|
||||
|
@ -216,21 +214,20 @@ public class RestoreTool {
|
|||
* Returns value represent path for path to backup table snapshot directory:
|
||||
* "/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot"
|
||||
* @param backupRootPath backup root path
|
||||
* @param tableName table name
|
||||
* @param backupId backup Id
|
||||
* @param tableName table name
|
||||
* @param backupId backup Id
|
||||
* @return path for snapshot
|
||||
*/
|
||||
Path getTableSnapshotPath(Path backupRootPath, TableName tableName, String backupId) {
|
||||
return new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
|
||||
HConstants.SNAPSHOT_DIR_NAME);
|
||||
HConstants.SNAPSHOT_DIR_NAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns value represent path for:
|
||||
* ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/
|
||||
* snapshot_1396650097621_namespace_table"
|
||||
* this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo,
|
||||
* .data.manifest (trunk)
|
||||
* snapshot_1396650097621_namespace_table" this path contains .snapshotinfo, .tabledesc (0.96 and
|
||||
* 0.98) this path contains .snapshotinfo, .data.manifest (trunk)
|
||||
* @param tableName table name
|
||||
* @return path to table info
|
||||
* @throws IOException exception
|
||||
|
@ -241,7 +238,7 @@ public class RestoreTool {
|
|||
|
||||
// can't build the path directly as the timestamp values are different
|
||||
FileStatus[] snapshots = fs.listStatus(tableSnapShotPath,
|
||||
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
|
||||
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
|
||||
for (FileStatus snapshot : snapshots) {
|
||||
tableInfoPath = snapshot.getPath();
|
||||
// SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest";
|
||||
|
@ -264,28 +261,27 @@ public class RestoreTool {
|
|||
TableDescriptor tableDescriptor = manifest.getTableDescriptor();
|
||||
if (!tableDescriptor.getTableName().equals(tableName)) {
|
||||
LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
|
||||
+ tableInfoPath.toString());
|
||||
LOG.error("tableDescriptor.getNameAsString() = "
|
||||
+ tableDescriptor.getTableName().getNameAsString());
|
||||
+ tableInfoPath.toString());
|
||||
LOG.error(
|
||||
"tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString());
|
||||
throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName
|
||||
+ " under tableInfoPath: " + tableInfoPath.toString());
|
||||
+ " under tableInfoPath: " + tableInfoPath.toString());
|
||||
}
|
||||
return tableDescriptor;
|
||||
}
|
||||
|
||||
private TableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName,
|
||||
String lastIncrBackupId) throws IOException {
|
||||
String lastIncrBackupId) throws IOException {
|
||||
if (lastIncrBackupId != null) {
|
||||
String target =
|
||||
BackupUtils.getTableBackupDir(backupRootPath.toString(),
|
||||
lastIncrBackupId, tableName);
|
||||
BackupUtils.getTableBackupDir(backupRootPath.toString(), lastIncrBackupId, tableName);
|
||||
return FSTableDescriptors.getTableDescriptorFromFs(fileSys, new Path(target));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void createAndRestoreTable(Connection conn, TableName tableName, TableName newTableName,
|
||||
Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
|
||||
Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
|
||||
if (newTableName == null) {
|
||||
newTableName = tableName;
|
||||
}
|
||||
|
@ -304,7 +300,7 @@ public class RestoreTool {
|
|||
// check whether snapshot dir already recorded for target table
|
||||
if (snapshotMap.get(tableName) != null) {
|
||||
SnapshotDescription desc =
|
||||
SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
|
||||
SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
|
||||
SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc);
|
||||
tableDescriptor = manifest.getTableDescriptor();
|
||||
} else {
|
||||
|
@ -315,8 +311,8 @@ public class RestoreTool {
|
|||
LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost");
|
||||
}
|
||||
} else {
|
||||
throw new IOException("Table snapshot directory: " +
|
||||
tableSnapshotPath + " does not exist.");
|
||||
throw new IOException(
|
||||
"Table snapshot directory: " + tableSnapshotPath + " does not exist.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -326,15 +322,15 @@ public class RestoreTool {
|
|||
// find table descriptor but no archive dir means the table is empty, create table and exit
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("find table descriptor but no archive dir for table " + tableName
|
||||
+ ", will only create table");
|
||||
+ ", will only create table");
|
||||
}
|
||||
tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
|
||||
checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor,
|
||||
truncateIfExists);
|
||||
return;
|
||||
} else {
|
||||
throw new IllegalStateException("Cannot restore hbase table because directory '"
|
||||
+ " tableArchivePath is null.");
|
||||
throw new IllegalStateException(
|
||||
"Cannot restore hbase table because directory '" + " tableArchivePath is null.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -356,7 +352,8 @@ public class RestoreTool {
|
|||
RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
|
||||
Path[] paths = new Path[regionPathList.size()];
|
||||
regionPathList.toArray(paths);
|
||||
restoreService.run(paths, new TableName[]{tableName}, new TableName[] {newTableName}, true);
|
||||
restoreService.run(paths, new TableName[] { tableName }, new TableName[] { newTableName },
|
||||
true);
|
||||
|
||||
} catch (Exception e) {
|
||||
LOG.error(e.toString(), e);
|
||||
|
@ -430,9 +427,11 @@ public class RestoreTool {
|
|||
// start to parse hfile inside one family dir
|
||||
Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
|
||||
for (Path hfile : hfiles) {
|
||||
if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".")
|
||||
if (
|
||||
hfile.getName().startsWith("_") || hfile.getName().startsWith(".")
|
||||
|| StoreFileInfo.isReference(hfile.getName())
|
||||
|| HFileLink.isHFileLink(hfile.getName())) {
|
||||
|| HFileLink.isHFileLink(hfile.getName())
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
HFile.Reader reader = HFile.createReader(fs, hfile, conf);
|
||||
|
@ -441,7 +440,7 @@ public class RestoreTool {
|
|||
first = reader.getFirstRowKey().get();
|
||||
last = reader.getLastRowKey().get();
|
||||
LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first="
|
||||
+ Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
|
||||
+ Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
|
||||
|
||||
// To eventually infer start key-end key boundaries
|
||||
Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0;
|
||||
|
@ -460,24 +459,24 @@ public class RestoreTool {
|
|||
/**
|
||||
* Prepare the table for bulkload, most codes copied from {@code createTable} method in
|
||||
* {@code BulkLoadHFilesTool}.
|
||||
* @param conn connection
|
||||
* @param tableBackupPath path
|
||||
* @param tableName table name
|
||||
* @param targetTableName target table name
|
||||
* @param regionDirList region directory list
|
||||
* @param htd table descriptor
|
||||
* @param conn connection
|
||||
* @param tableBackupPath path
|
||||
* @param tableName table name
|
||||
* @param targetTableName target table name
|
||||
* @param regionDirList region directory list
|
||||
* @param htd table descriptor
|
||||
* @param truncateIfExists truncates table if exists
|
||||
* @throws IOException exception
|
||||
*/
|
||||
private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName,
|
||||
TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd,
|
||||
boolean truncateIfExists) throws IOException {
|
||||
TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd,
|
||||
boolean truncateIfExists) throws IOException {
|
||||
try (Admin admin = conn.getAdmin()) {
|
||||
boolean createNew = false;
|
||||
if (admin.tableExists(targetTableName)) {
|
||||
if (truncateIfExists) {
|
||||
LOG.info("Truncating exising target table '" + targetTableName
|
||||
+ "', preserving region splits");
|
||||
LOG.info(
|
||||
"Truncating exising target table '" + targetTableName + "', preserving region splits");
|
||||
admin.disableTable(targetTableName);
|
||||
admin.truncateTable(targetTableName, true);
|
||||
} else {
|
||||
|
@ -497,7 +496,7 @@ public class RestoreTool {
|
|||
// create table using table descriptor and region boundaries
|
||||
admin.createTable(htd, keys);
|
||||
}
|
||||
} catch (NamespaceNotFoundException e){
|
||||
} catch (NamespaceNotFoundException e) {
|
||||
LOG.warn("There was no namespace and the same will be created");
|
||||
String namespaceAsString = targetTableName.getNamespaceAsString();
|
||||
LOG.info("Creating target namespace '" + namespaceAsString + "'");
|
||||
|
@ -519,7 +518,7 @@ public class RestoreTool {
|
|||
}
|
||||
if (EnvironmentEdgeManager.currentTime() - startTime > TABLE_AVAILABILITY_WAIT_TIME) {
|
||||
throw new IOException("Time out " + TABLE_AVAILABILITY_WAIT_TIME + "ms expired, table "
|
||||
+ targetTableName + " is still not available");
|
||||
+ targetTableName + " is still not available");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/*
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -111,8 +110,8 @@ public class TestBackupBase {
|
|||
public IncrementalTableBackupClientForTest() {
|
||||
}
|
||||
|
||||
public IncrementalTableBackupClientForTest(Connection conn,
|
||||
String backupId, BackupRequest request) throws IOException {
|
||||
public IncrementalTableBackupClientForTest(Connection conn, String backupId,
|
||||
BackupRequest request) throws IOException {
|
||||
super(conn, backupId, request);
|
||||
}
|
||||
|
||||
|
@ -127,13 +126,13 @@ public class TestBackupBase {
|
|||
failStageIf(Stage.stage_1);
|
||||
backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL);
|
||||
LOG.debug("For incremental backup, current table set is "
|
||||
+ backupManager.getIncrementalBackupTableSet());
|
||||
+ backupManager.getIncrementalBackupTableSet());
|
||||
newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
|
||||
// copy out the table and region info files for each table
|
||||
BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
|
||||
// convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
|
||||
convertWALsToHFiles();
|
||||
incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()},
|
||||
incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() },
|
||||
backupInfo.getBackupRootDir());
|
||||
failStageIf(Stage.stage_2);
|
||||
|
||||
|
@ -142,7 +141,7 @@ public class TestBackupBase {
|
|||
// After this checkpoint, even if entering cancel process, will let the backup finished
|
||||
// Set the previousTimestampMap which is before this current log roll to the manifest.
|
||||
Map<TableName, Map<String, Long>> previousTimestampMap =
|
||||
backupManager.readLogTimestampMap();
|
||||
backupManager.readLogTimestampMap();
|
||||
backupInfo.setIncrTimestampMap(previousTimestampMap);
|
||||
|
||||
// The table list in backupInfo is good for both full backup and incremental backup.
|
||||
|
@ -151,10 +150,10 @@ public class TestBackupBase {
|
|||
failStageIf(Stage.stage_3);
|
||||
|
||||
Map<TableName, Map<String, Long>> newTableSetTimestampMap =
|
||||
backupManager.readLogTimestampMap();
|
||||
backupManager.readLogTimestampMap();
|
||||
|
||||
Long newStartCode =
|
||||
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
|
||||
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
|
||||
backupManager.writeBackupStartCode(newStartCode);
|
||||
|
||||
handleBulkLoad(backupInfo.getTableNames());
|
||||
|
@ -176,7 +175,7 @@ public class TestBackupBase {
|
|||
}
|
||||
|
||||
public FullTableBackupClientForTest(Connection conn, String backupId, BackupRequest request)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
super(conn, backupId, request);
|
||||
}
|
||||
|
||||
|
@ -215,9 +214,8 @@ public class TestBackupBase {
|
|||
// SNAPSHOT_TABLES:
|
||||
backupInfo.setPhase(BackupPhase.SNAPSHOT);
|
||||
for (TableName tableName : tableList) {
|
||||
String snapshotName =
|
||||
"snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
|
||||
+ tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
|
||||
String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime())
|
||||
+ "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
|
||||
|
||||
snapshotTable(admin, tableName, snapshotName);
|
||||
backupInfo.setSnapshotName(tableName, snapshotName);
|
||||
|
@ -239,11 +237,10 @@ public class TestBackupBase {
|
|||
backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
|
||||
|
||||
Map<TableName, Map<String, Long>> newTableSetTimestampMap =
|
||||
backupManager.readLogTimestampMap();
|
||||
backupManager.readLogTimestampMap();
|
||||
|
||||
Long newStartCode =
|
||||
BackupUtils.getMinValue(BackupUtils
|
||||
.getRSLogTimestampMins(newTableSetTimestampMap));
|
||||
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
|
||||
backupManager.writeBackupStartCode(newStartCode);
|
||||
failStageIf(Stage.stage_4);
|
||||
// backup complete
|
||||
|
@ -251,7 +248,7 @@ public class TestBackupBase {
|
|||
|
||||
} catch (Exception e) {
|
||||
|
||||
if(autoRestoreOnFailure) {
|
||||
if (autoRestoreOnFailure) {
|
||||
failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ",
|
||||
BackupType.FULL, conf);
|
||||
}
|
||||
|
@ -261,13 +258,13 @@ public class TestBackupBase {
|
|||
}
|
||||
|
||||
public static void setUpHelper() throws Exception {
|
||||
BACKUP_ROOT_DIR = Path.SEPARATOR +"backupUT";
|
||||
BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT";
|
||||
BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT";
|
||||
|
||||
if (secure) {
|
||||
// set the always on security provider
|
||||
UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
|
||||
HadoopSecurityEnabledUserProviderForTesting.class);
|
||||
HadoopSecurityEnabledUserProviderForTesting.class);
|
||||
// setup configuration
|
||||
SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
|
||||
}
|
||||
|
@ -299,23 +296,21 @@ public class TestBackupBase {
|
|||
|
||||
TEST_UTIL.startMiniMapReduceCluster();
|
||||
BACKUP_ROOT_DIR =
|
||||
new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")),
|
||||
BACKUP_ROOT_DIR).toString();
|
||||
new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), BACKUP_ROOT_DIR)
|
||||
.toString();
|
||||
LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
|
||||
if (useSecondCluster) {
|
||||
BACKUP_REMOTE_ROOT_DIR =
|
||||
new Path(new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS"))
|
||||
+ BACKUP_REMOTE_ROOT_DIR).toString();
|
||||
BACKUP_REMOTE_ROOT_DIR = new Path(
|
||||
new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) + BACKUP_REMOTE_ROOT_DIR)
|
||||
.toString();
|
||||
LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
|
||||
}
|
||||
createTables();
|
||||
populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Setup Cluster with appropriate configurations before running tests.
|
||||
*
|
||||
* @throws Exception if starting the mini cluster or setting up the tables fails
|
||||
*/
|
||||
@BeforeClass
|
||||
|
@ -327,7 +322,6 @@ public class TestBackupBase {
|
|||
setUpHelper();
|
||||
}
|
||||
|
||||
|
||||
private static void populateFromMasterConfig(Configuration masterConf, Configuration conf) {
|
||||
Iterator<Entry<String, String>> it = masterConf.iterator();
|
||||
while (it.hasNext()) {
|
||||
|
@ -341,7 +335,7 @@ public class TestBackupBase {
|
|||
*/
|
||||
@AfterClass
|
||||
public static void tearDown() throws Exception {
|
||||
try{
|
||||
try {
|
||||
SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin());
|
||||
} catch (Exception e) {
|
||||
}
|
||||
|
@ -356,7 +350,7 @@ public class TestBackupBase {
|
|||
}
|
||||
|
||||
Table insertIntoTable(Connection conn, TableName table, byte[] family, int id, int numRows)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
Table t = conn.getTable(table);
|
||||
Put p1;
|
||||
for (int i = 0; i < numRows; i++) {
|
||||
|
@ -367,17 +361,16 @@ public class TestBackupBase {
|
|||
return t;
|
||||
}
|
||||
|
||||
protected BackupRequest createBackupRequest(BackupType type,
|
||||
List<TableName> tables, String path) {
|
||||
protected BackupRequest createBackupRequest(BackupType type, List<TableName> tables,
|
||||
String path) {
|
||||
BackupRequest.Builder builder = new BackupRequest.Builder();
|
||||
BackupRequest request = builder.withBackupType(type)
|
||||
.withTableList(tables)
|
||||
.withTargetRootDir(path).build();
|
||||
BackupRequest request =
|
||||
builder.withBackupType(type).withTableList(tables).withTargetRootDir(path).build();
|
||||
return request;
|
||||
}
|
||||
|
||||
protected String backupTables(BackupType type, List<TableName> tables, String path)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
Connection conn = null;
|
||||
BackupAdmin badmin = null;
|
||||
String backupId;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -35,13 +35,12 @@ public class TestBackupBoundaryTests extends TestBackupBase {
|
|||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestBackupBoundaryTests.class);
|
||||
HBaseClassTestRule.forClass(TestBackupBoundaryTests.class);
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestBackupBoundaryTests.class);
|
||||
|
||||
/**
|
||||
* Verify that full backup is created on a single empty table correctly.
|
||||
*
|
||||
* @throws Exception if doing the full backup fails
|
||||
*/
|
||||
@Test
|
||||
|
@ -53,7 +52,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
|
|||
|
||||
/**
|
||||
* Verify that full backup is created on multiple empty tables correctly.
|
||||
*
|
||||
* @throws Exception if doing the full backup fails
|
||||
*/
|
||||
@Test
|
||||
|
@ -66,7 +64,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
|
|||
|
||||
/**
|
||||
* Verify that full backup fails on a single table that does not exist.
|
||||
*
|
||||
* @throws Exception if doing the full backup fails
|
||||
*/
|
||||
@Test(expected = IOException.class)
|
||||
|
@ -78,7 +75,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
|
|||
|
||||
/**
|
||||
* Verify that full backup fails on multiple tables that do not exist.
|
||||
*
|
||||
* @throws Exception if doing the full backup fails
|
||||
*/
|
||||
@Test(expected = IOException.class)
|
||||
|
@ -90,7 +86,6 @@ public class TestBackupBoundaryTests extends TestBackupBase {
|
|||
|
||||
/**
|
||||
* Verify that full backup fails on tableset containing real and fake tables.
|
||||
*
|
||||
* @throws Exception if doing the full backup fails
|
||||
*/
|
||||
@Test(expected = IOException.class)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue