parent
0edecbf9e0
commit
9c8c9e7fbf
228
CHANGES.txt
228
CHANGES.txt
|
@ -7,7 +7,7 @@ Release 0.92.1 - Unreleased
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
HBASE-5176 AssignmentManager#getRegion: logging nit adds a redundant '+' (Karthik K)
|
HBASE-5176 AssignmentManager#getRegion: logging nit adds a redundant '+' (Karthik K)
|
||||||
HBASE-5237 Addendum for HBASE-5160 and HBASE-4397 (Ram)
|
HBASE-5237 Addendum for HBASE-5160 and HBASE-4397 (Ram)
|
||||||
HBASE-5235 HLogSplitter writer thread's streams not getting closed when any
|
HBASE-5235 HLogSplitter writer thread's streams not getting closed when any
|
||||||
of the writer threads has exceptions. (Ram)
|
of the writer threads has exceptions. (Ram)
|
||||||
HBASE-5243 LogSyncerThread not getting shutdown waiting for the interrupted flag (Ram)
|
HBASE-5243 LogSyncerThread not getting shutdown waiting for the interrupted flag (Ram)
|
||||||
HBASE-5255 Use singletons for OperationStatus to save memory (Benoit)
|
HBASE-5255 Use singletons for OperationStatus to save memory (Benoit)
|
||||||
|
@ -144,7 +144,7 @@ Release 0.92.0 - 01/23/2012
|
||||||
HBASE-3897 Docs (notsoquick guide) suggest invalid XML (Philip Zeyliger)
|
HBASE-3897 Docs (notsoquick guide) suggest invalid XML (Philip Zeyliger)
|
||||||
HBASE-3898 TestSplitTransactionOnCluster broke in TRUNK
|
HBASE-3898 TestSplitTransactionOnCluster broke in TRUNK
|
||||||
HBASE-3826 Minor compaction needs to check if still over
|
HBASE-3826 Minor compaction needs to check if still over
|
||||||
compactionThreshold after compacting (Nicolas Spiegelberg)
|
compactionThreshold after compacting (Nicolas Spiegelberg)
|
||||||
HBASE-3912 [Stargate] Columns not handle by Scan
|
HBASE-3912 [Stargate] Columns not handle by Scan
|
||||||
HBASE-3903 A successful write to client write-buffer may be lost or not
|
HBASE-3903 A successful write to client write-buffer may be lost or not
|
||||||
visible (Doug Meil)
|
visible (Doug Meil)
|
||||||
|
@ -198,7 +198,7 @@ Release 0.92.0 - 01/23/2012
|
||||||
HBASE-4112 Creating table may throw NullPointerException (Jinchao via Ted Yu)
|
HBASE-4112 Creating table may throw NullPointerException (Jinchao via Ted Yu)
|
||||||
HBASE-4093 When verifyAndAssignRoot throws exception, the deadServers state
|
HBASE-4093 When verifyAndAssignRoot throws exception, the deadServers state
|
||||||
cannot be changed (fulin wang via Ted Yu)
|
cannot be changed (fulin wang via Ted Yu)
|
||||||
HBASE-4118 method regionserver.MemStore#updateColumnValue: the check for
|
HBASE-4118 method regionserver.MemStore#updateColumnValue: the check for
|
||||||
qualifier and family is missing (N Keywal via Ted Yu)
|
qualifier and family is missing (N Keywal via Ted Yu)
|
||||||
HBASE-4127 Don't modify table's name away in HBaseAdmin
|
HBASE-4127 Don't modify table's name away in HBaseAdmin
|
||||||
HBASE-4105 Stargate does not support Content-Type: application/json and
|
HBASE-4105 Stargate does not support Content-Type: application/json and
|
||||||
|
@ -300,7 +300,7 @@ Release 0.92.0 - 01/23/2012
|
||||||
HBASE-4395 EnableTableHandler races with itself
|
HBASE-4395 EnableTableHandler races with itself
|
||||||
HBASE-4414 Region splits by size not being triggered
|
HBASE-4414 Region splits by size not being triggered
|
||||||
HBASE-4322 HBASE-4322 [hbck] Update checkIntegrity/checkRegionChain
|
HBASE-4322 HBASE-4322 [hbck] Update checkIntegrity/checkRegionChain
|
||||||
to present more accurate region split problem
|
to present more accurate region split problem
|
||||||
(Jon Hseih)
|
(Jon Hseih)
|
||||||
HBASE-4417 HBaseAdmin.checkHBaseAvailable() doesn't close ZooKeeper connections
|
HBASE-4417 HBaseAdmin.checkHBaseAvailable() doesn't close ZooKeeper connections
|
||||||
(Stefan Seelmann)
|
(Stefan Seelmann)
|
||||||
|
@ -483,7 +483,7 @@ Release 0.92.0 - 01/23/2012
|
||||||
HBASE-5100 Rollback of split could cause closed region to be opened again (Chunhui)
|
HBASE-5100 Rollback of split could cause closed region to be opened again (Chunhui)
|
||||||
HBASE-4397 -ROOT-, .META. tables stay offline for too long in recovery phase after all RSs
|
HBASE-4397 -ROOT-, .META. tables stay offline for too long in recovery phase after all RSs
|
||||||
are shutdown at the same time (Ming Ma)
|
are shutdown at the same time (Ming Ma)
|
||||||
HBASE-5094 The META can hold an entry for a region with a different server name from the one
|
HBASE-5094 The META can hold an entry for a region with a different server name from the one
|
||||||
actually in the AssignmentManager thus making the region inaccessible. (Ram)
|
actually in the AssignmentManager thus making the region inaccessible. (Ram)
|
||||||
HBASE-5081 Distributed log splitting deleteNode races against splitLog retry (Prakash)
|
HBASE-5081 Distributed log splitting deleteNode races against splitLog retry (Prakash)
|
||||||
HBASE-4357 Region stayed in transition - in closing state (Ming Ma)
|
HBASE-4357 Region stayed in transition - in closing state (Ming Ma)
|
||||||
|
@ -517,7 +517,7 @@ Release 0.92.0 - 01/23/2012
|
||||||
HBASE-5105 TestImportTsv failed with hadoop 0.22 (Ming Ma)
|
HBASE-5105 TestImportTsv failed with hadoop 0.22 (Ming Ma)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)
|
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)
|
||||||
HBASE-3292 Expose block cache hit/miss/evict counts into region server
|
HBASE-3292 Expose block cache hit/miss/evict counts into region server
|
||||||
metrics
|
metrics
|
||||||
HBASE-2936 Differentiate between daemon & restart sleep periods
|
HBASE-2936 Differentiate between daemon & restart sleep periods
|
||||||
|
@ -538,7 +538,7 @@ Release 0.92.0 - 01/23/2012
|
||||||
(rpc version 43)
|
(rpc version 43)
|
||||||
HBASE-3563 [site] Add one-page-only version of hbase doc
|
HBASE-3563 [site] Add one-page-only version of hbase doc
|
||||||
HBASE-3564 DemoClient.pl - a demo client in Perl
|
HBASE-3564 DemoClient.pl - a demo client in Perl
|
||||||
HBASE-3560 the hbase-default entry of "hbase.defaults.for.version"
|
HBASE-3560 the hbase-default entry of "hbase.defaults.for.version"
|
||||||
causes tests not to run via not-maven
|
causes tests not to run via not-maven
|
||||||
HBASE-3513 upgrade thrift to 0.5.0 and use mvn version
|
HBASE-3513 upgrade thrift to 0.5.0 and use mvn version
|
||||||
HBASE-3533 Allow HBASE_LIBRARY_PATH env var to specify extra locations
|
HBASE-3533 Allow HBASE_LIBRARY_PATH env var to specify extra locations
|
||||||
|
@ -601,7 +601,7 @@ Release 0.92.0 - 01/23/2012
|
||||||
HBASE-3765 metrics.xml - small format change and adding nav to hbase
|
HBASE-3765 metrics.xml - small format change and adding nav to hbase
|
||||||
book metrics section (Doug Meil)
|
book metrics section (Doug Meil)
|
||||||
HBASE-3759 Eliminate use of ThreadLocals for CoprocessorEnvironment
|
HBASE-3759 Eliminate use of ThreadLocals for CoprocessorEnvironment
|
||||||
bypass() and complete()
|
bypass() and complete()
|
||||||
HBASE-3701 revisit ArrayList creation (Ted Yu via Stack)
|
HBASE-3701 revisit ArrayList creation (Ted Yu via Stack)
|
||||||
HBASE-3753 Book.xml - architecture, adding more Store info (Doug Meil)
|
HBASE-3753 Book.xml - architecture, adding more Store info (Doug Meil)
|
||||||
HBASE-3784 book.xml - adding small subsection in architecture/client on
|
HBASE-3784 book.xml - adding small subsection in architecture/client on
|
||||||
|
@ -738,7 +738,7 @@ Release 0.92.0 - 01/23/2012
|
||||||
HBASE-4425 Provide access to RpcServer instance from RegionServerServices
|
HBASE-4425 Provide access to RpcServer instance from RegionServerServices
|
||||||
HBASE-4411 When copying tables/CFs, allow CF names to be changed
|
HBASE-4411 When copying tables/CFs, allow CF names to be changed
|
||||||
(David Revell)
|
(David Revell)
|
||||||
HBASE-4424 Provide coprocessors access to createTable() via
|
HBASE-4424 Provide coprocessors access to createTable() via
|
||||||
MasterServices
|
MasterServices
|
||||||
HBASE-4432 Enable/Disable off heap cache with config (Li Pi)
|
HBASE-4432 Enable/Disable off heap cache with config (Li Pi)
|
||||||
HBASE-4434 seek optimization: don't do eager HFile Scanner
|
HBASE-4434 seek optimization: don't do eager HFile Scanner
|
||||||
|
@ -1098,7 +1098,7 @@ Release 0.90.3 - May 19th, 2011
|
||||||
HBASE-3846 Set RIT timeout higher
|
HBASE-3846 Set RIT timeout higher
|
||||||
|
|
||||||
Release 0.90.2 - 20110408
|
Release 0.90.2 - 20110408
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
HBASE-3545 Possible liveness issue with MasterServerAddress in
|
HBASE-3545 Possible liveness issue with MasterServerAddress in
|
||||||
HRegionServer getMaster (Greg Bowyer via Stack)
|
HRegionServer getMaster (Greg Bowyer via Stack)
|
||||||
|
@ -1151,7 +1151,7 @@ Release 0.90.2 - 20110408
|
||||||
HBASE-3654 Weird blocking between getOnlineRegion and createRegionLoad
|
HBASE-3654 Weird blocking between getOnlineRegion and createRegionLoad
|
||||||
(Subbu M Iyer via Stack)
|
(Subbu M Iyer via Stack)
|
||||||
HBASE-3666 TestScannerTimeout fails occasionally
|
HBASE-3666 TestScannerTimeout fails occasionally
|
||||||
HBASE-3497 TableMapReduceUtil.initTableReducerJob broken due to setConf
|
HBASE-3497 TableMapReduceUtil.initTableReducerJob broken due to setConf
|
||||||
method in TableOutputFormat
|
method in TableOutputFormat
|
||||||
HBASE-3686 ClientScanner skips too many rows on recovery if using scanner
|
HBASE-3686 ClientScanner skips too many rows on recovery if using scanner
|
||||||
caching (Sean Sechrist via Stack)
|
caching (Sean Sechrist via Stack)
|
||||||
|
@ -1159,7 +1159,7 @@ Release 0.90.2 - 20110408
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-3542 MultiGet methods in Thrift
|
HBASE-3542 MultiGet methods in Thrift
|
||||||
HBASE-3586 Improve the selection of regions to balance (Ted Yu via Andrew
|
HBASE-3586 Improve the selection of regions to balance (Ted Yu via Andrew
|
||||||
Purtell)
|
Purtell)
|
||||||
HBASE-3603 Remove -XX:+HeapDumpOnOutOfMemoryError autodump of heap option
|
HBASE-3603 Remove -XX:+HeapDumpOnOutOfMemoryError autodump of heap option
|
||||||
on OOME
|
on OOME
|
||||||
HBASE-3285 Hlog recovery takes too much time
|
HBASE-3285 Hlog recovery takes too much time
|
||||||
|
@ -1186,19 +1186,19 @@ Release 0.90.1 - February 9th, 2011
|
||||||
HBASE-3455 Add memstore-local allocation buffers to combat heap
|
HBASE-3455 Add memstore-local allocation buffers to combat heap
|
||||||
fragmentation in the region server. Experimental / disabled
|
fragmentation in the region server. Experimental / disabled
|
||||||
by default in 0.90.1
|
by default in 0.90.1
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
HBASE-3445 Master crashes on data that was moved from different host
|
HBASE-3445 Master crashes on data that was moved from different host
|
||||||
HBASE-3449 Server shutdown handlers deadlocked waiting for META
|
HBASE-3449 Server shutdown handlers deadlocked waiting for META
|
||||||
HBASE-3456 Fix hardcoding of 20 second socket timeout down in HBaseClient
|
HBASE-3456 Fix hardcoding of 20 second socket timeout down in HBaseClient
|
||||||
HBASE-3476 HFile -m option need not scan key values
|
HBASE-3476 HFile -m option need not scan key values
|
||||||
(Prakash Khemani via Lars George)
|
(Prakash Khemani via Lars George)
|
||||||
HBASE-3481 max seq id in flushed file can be larger than its correct value
|
HBASE-3481 max seq id in flushed file can be larger than its correct value
|
||||||
causing data loss during recovery
|
causing data loss during recovery
|
||||||
HBASE-3493 HMaster sometimes hangs during initialization due to missing
|
HBASE-3493 HMaster sometimes hangs during initialization due to missing
|
||||||
notify call (Bruno Dumon via Stack)
|
notify call (Bruno Dumon via Stack)
|
||||||
HBASE-3483 Memstore lower limit should trigger asynchronous flushes
|
HBASE-3483 Memstore lower limit should trigger asynchronous flushes
|
||||||
HBASE-3494 checkAndPut implementation doesnt verify row param and writable
|
HBASE-3494 checkAndPut implementation doesnt verify row param and writable
|
||||||
row are the same
|
row are the same
|
||||||
HBASE-3416 For intra-row scanning, the update readers notification resets
|
HBASE-3416 For intra-row scanning, the update readers notification resets
|
||||||
the query matcher and can lead to incorrect behavior
|
the query matcher and can lead to incorrect behavior
|
||||||
|
@ -1288,7 +1288,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-1830 HbaseObjectWritable methods should allow null HBCs
|
HBASE-1830 HbaseObjectWritable methods should allow null HBCs
|
||||||
for when Writable is not Configurable (Stack via jgray)
|
for when Writable is not Configurable (Stack via jgray)
|
||||||
HBASE-1847 Delete latest of a null qualifier when non-null qualifiers
|
HBASE-1847 Delete latest of a null qualifier when non-null qualifiers
|
||||||
exist throws a RuntimeException
|
exist throws a RuntimeException
|
||||||
HBASE-1850 src/examples/mapred do not compile after HBASE-1822
|
HBASE-1850 src/examples/mapred do not compile after HBASE-1822
|
||||||
HBASE-1853 Each time around the regionserver core loop, we clear the
|
HBASE-1853 Each time around the regionserver core loop, we clear the
|
||||||
messages to pass master, even if we failed to deliver them
|
messages to pass master, even if we failed to deliver them
|
||||||
|
@ -1343,9 +1343,9 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-1954 Transactional scans do not see newest put (Clint Morgan via
|
HBASE-1954 Transactional scans do not see newest put (Clint Morgan via
|
||||||
Stack)
|
Stack)
|
||||||
HBASE-1919 code: HRS.delete seems to ignore exceptions it shouldnt
|
HBASE-1919 code: HRS.delete seems to ignore exceptions it shouldnt
|
||||||
HBASE-1951 Stack overflow when calling HTable.checkAndPut()
|
HBASE-1951 Stack overflow when calling HTable.checkAndPut()
|
||||||
when deleting a lot of values
|
when deleting a lot of values
|
||||||
HBASE-1781 Weird behavior of WildcardColumnTracker.checkColumn(),
|
HBASE-1781 Weird behavior of WildcardColumnTracker.checkColumn(),
|
||||||
looks like recursive loop
|
looks like recursive loop
|
||||||
HBASE-1949 KeyValue expiration by Time-to-Live during major compaction is
|
HBASE-1949 KeyValue expiration by Time-to-Live during major compaction is
|
||||||
broken (Gary Helmling via Stack)
|
broken (Gary Helmling via Stack)
|
||||||
|
@ -1377,7 +1377,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
'descendingIterator' (Ching-Shen Chen via Stack)
|
'descendingIterator' (Ching-Shen Chen via Stack)
|
||||||
HBASE-2033 Shell scan 'limit' is off by one
|
HBASE-2033 Shell scan 'limit' is off by one
|
||||||
HBASE-2040 Fixes to group commit
|
HBASE-2040 Fixes to group commit
|
||||||
HBASE-2047 Example command in the "Getting Started"
|
HBASE-2047 Example command in the "Getting Started"
|
||||||
documentation doesn't work (Benoit Sigoure via JD)
|
documentation doesn't work (Benoit Sigoure via JD)
|
||||||
HBASE-2048 Small inconsistency in the "Example API Usage"
|
HBASE-2048 Small inconsistency in the "Example API Usage"
|
||||||
(Benoit Sigoure via JD)
|
(Benoit Sigoure via JD)
|
||||||
|
@ -1385,14 +1385,14 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-1960 Master should wait for DFS to come up when creating
|
HBASE-1960 Master should wait for DFS to come up when creating
|
||||||
hbase.version
|
hbase.version
|
||||||
HBASE-2054 memstore size 0 is >= than blocking -2.0g size
|
HBASE-2054 memstore size 0 is >= than blocking -2.0g size
|
||||||
HBASE-2064 Cannot disable a table if at the same the Master is moving
|
HBASE-2064 Cannot disable a table if at the same the Master is moving
|
||||||
its regions around
|
its regions around
|
||||||
HBASE-2065 Cannot disable a table if any of its region is opening
|
HBASE-2065 Cannot disable a table if any of its region is opening
|
||||||
at the same time
|
at the same time
|
||||||
HBASE-2026 NPE in StoreScanner on compaction
|
HBASE-2026 NPE in StoreScanner on compaction
|
||||||
HBASE-2072 fs.automatic.close isn't passed to FileSystem
|
HBASE-2072 fs.automatic.close isn't passed to FileSystem
|
||||||
HBASE-2075 Master requires HDFS superuser privileges due to waitOnSafeMode
|
HBASE-2075 Master requires HDFS superuser privileges due to waitOnSafeMode
|
||||||
HBASE-2077 NullPointerException with an open scanner that expired causing
|
HBASE-2077 NullPointerException with an open scanner that expired causing
|
||||||
an immediate region server shutdown (Sam Pullara via JD)
|
an immediate region server shutdown (Sam Pullara via JD)
|
||||||
HBASE-2078 Add JMX settings as commented out lines to hbase-env.sh
|
HBASE-2078 Add JMX settings as commented out lines to hbase-env.sh
|
||||||
(Lars George via JD)
|
(Lars George via JD)
|
||||||
|
@ -1459,11 +1459,11 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2258 The WhileMatchFilter doesn't delegate the call to filterRow()
|
HBASE-2258 The WhileMatchFilter doesn't delegate the call to filterRow()
|
||||||
HBASE-2259 StackOverflow in ExplicitColumnTracker when row has many columns
|
HBASE-2259 StackOverflow in ExplicitColumnTracker when row has many columns
|
||||||
HBASE-2268 [stargate] Failed tests and DEBUG output is dumped to console
|
HBASE-2268 [stargate] Failed tests and DEBUG output is dumped to console
|
||||||
since move to Mavenized build
|
since move to Mavenized build
|
||||||
HBASE-2276 Hbase Shell hcd() method is broken by the replication scope
|
HBASE-2276 Hbase Shell hcd() method is broken by the replication scope
|
||||||
parameter (Alexey Kovyrin via Lars George)
|
parameter (Alexey Kovyrin via Lars George)
|
||||||
HBASE-2244 META gets inconsistent in a number of crash scenarios
|
HBASE-2244 META gets inconsistent in a number of crash scenarios
|
||||||
HBASE-2284 fsWriteLatency metric may be incorrectly reported
|
HBASE-2284 fsWriteLatency metric may be incorrectly reported
|
||||||
(Kannan Muthukkaruppan via Stack)
|
(Kannan Muthukkaruppan via Stack)
|
||||||
HBASE-2063 For hfileoutputformat, on timeout/failure/kill clean up
|
HBASE-2063 For hfileoutputformat, on timeout/failure/kill clean up
|
||||||
half-written hfile (Ruslan Salyakhov via Stack)
|
half-written hfile (Ruslan Salyakhov via Stack)
|
||||||
|
@ -1478,7 +1478,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2308 Fix the bin/rename_table.rb script, make it work again
|
HBASE-2308 Fix the bin/rename_table.rb script, make it work again
|
||||||
HBASE-2307 hbase-2295 changed hregion size, testheapsize broke... fix it
|
HBASE-2307 hbase-2295 changed hregion size, testheapsize broke... fix it
|
||||||
HBASE-2269 PerformanceEvaluation "--nomapred" may assign duplicate random
|
HBASE-2269 PerformanceEvaluation "--nomapred" may assign duplicate random
|
||||||
seed over multiple testing threads (Tatsuya Kawano via Stack)
|
seed over multiple testing threads (Tatsuya Kawano via Stack)
|
||||||
HBASE-2287 TypeError in shell (Alexey Kovyrin via Stack)
|
HBASE-2287 TypeError in shell (Alexey Kovyrin via Stack)
|
||||||
HBASE-2023 Client sync block can cause 1 thread of a multi-threaded client
|
HBASE-2023 Client sync block can cause 1 thread of a multi-threaded client
|
||||||
to block all others (Karthik Ranganathan via Stack)
|
to block all others (Karthik Ranganathan via Stack)
|
||||||
|
@ -1548,10 +1548,10 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2544 Forward port branch 0.20 WAL to TRUNK
|
HBASE-2544 Forward port branch 0.20 WAL to TRUNK
|
||||||
HBASE-2546 Specify default filesystem in both the new and old way (needed
|
HBASE-2546 Specify default filesystem in both the new and old way (needed
|
||||||
if we are to run on 0.20 and 0.21 hadoop)
|
if we are to run on 0.20 and 0.21 hadoop)
|
||||||
HBASE-1895 HConstants.MAX_ROW_LENGTH is incorrectly 64k, should be 32k
|
HBASE-1895 HConstants.MAX_ROW_LENGTH is incorrectly 64k, should be 32k
|
||||||
HBASE-1968 Give clients access to the write buffer
|
HBASE-1968 Give clients access to the write buffer
|
||||||
HBASE-2028 Add HTable.incrementColumnValue support to shell
|
HBASE-2028 Add HTable.incrementColumnValue support to shell
|
||||||
(Lars George via Andrew Purtell)
|
(Lars George via Andrew Purtell)
|
||||||
HBASE-2138 unknown metrics type
|
HBASE-2138 unknown metrics type
|
||||||
HBASE-2551 Forward port fixes that are in branch but not in trunk (part of
|
HBASE-2551 Forward port fixes that are in branch but not in trunk (part of
|
||||||
the merge of old 0.20 into TRUNK task) -- part 1.
|
the merge of old 0.20 into TRUNK task) -- part 1.
|
||||||
|
@ -1560,7 +1560,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2344 InfoServer and hence HBase Master doesn't fully start if you
|
HBASE-2344 InfoServer and hence HBase Master doesn't fully start if you
|
||||||
have HADOOP-6151 patch (Kannan Muthukkaruppan via Stack)
|
have HADOOP-6151 patch (Kannan Muthukkaruppan via Stack)
|
||||||
HBASE-2382 Don't rely on fs.getDefaultReplication() to roll HLogs
|
HBASE-2382 Don't rely on fs.getDefaultReplication() to roll HLogs
|
||||||
(Nicolas Spiegelberg via Stack)
|
(Nicolas Spiegelberg via Stack)
|
||||||
HBASE-2415 Disable META splitting in 0.20 (Todd Lipcon via Stack)
|
HBASE-2415 Disable META splitting in 0.20 (Todd Lipcon via Stack)
|
||||||
HBASE-2421 Put hangs for 10 retries on failed region servers
|
HBASE-2421 Put hangs for 10 retries on failed region servers
|
||||||
HBASE-2442 Log lease recovery catches IOException too widely
|
HBASE-2442 Log lease recovery catches IOException too widely
|
||||||
|
@ -1617,7 +1617,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2703 ui not working in distributed context
|
HBASE-2703 ui not working in distributed context
|
||||||
HBASE-2710 Shell should use default terminal width when autodetection fails
|
HBASE-2710 Shell should use default terminal width when autodetection fails
|
||||||
(Kannan Muthukkaruppan via Todd Lipcon)
|
(Kannan Muthukkaruppan via Todd Lipcon)
|
||||||
HBASE-2712 Cached region location that went stale won't recover if
|
HBASE-2712 Cached region location that went stale won't recover if
|
||||||
asking for first row
|
asking for first row
|
||||||
HBASE-2732 TestZooKeeper was broken, HBASE-2691 showed it
|
HBASE-2732 TestZooKeeper was broken, HBASE-2691 showed it
|
||||||
HBASE-2670 Provide atomicity for readers even when new insert has
|
HBASE-2670 Provide atomicity for readers even when new insert has
|
||||||
|
@ -1653,7 +1653,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2772 Scan doesn't recover from region server failure
|
HBASE-2772 Scan doesn't recover from region server failure
|
||||||
HBASE-2775 Update of hadoop jar in HBASE-2771 broke TestMultiClusters
|
HBASE-2775 Update of hadoop jar in HBASE-2771 broke TestMultiClusters
|
||||||
HBASE-2774 Spin in ReadWriteConsistencyControl eating CPU (load > 40) and
|
HBASE-2774 Spin in ReadWriteConsistencyControl eating CPU (load > 40) and
|
||||||
no progress running YCSB on clean cluster startup
|
no progress running YCSB on clean cluster startup
|
||||||
HBASE-2785 TestScannerTimeout.test2772 is flaky
|
HBASE-2785 TestScannerTimeout.test2772 is flaky
|
||||||
HBASE-2787 PE is confused about flushCommits
|
HBASE-2787 PE is confused about flushCommits
|
||||||
HBASE-2707 Can't recover from a dead ROOT server if any exceptions happens
|
HBASE-2707 Can't recover from a dead ROOT server if any exceptions happens
|
||||||
|
@ -1665,18 +1665,18 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2797 Another NPE in ReadWriteConsistencyControl
|
HBASE-2797 Another NPE in ReadWriteConsistencyControl
|
||||||
HBASE-2831 Fix '$bin' path duplication in setup scripts
|
HBASE-2831 Fix '$bin' path duplication in setup scripts
|
||||||
(Nicolas Spiegelberg via Stack)
|
(Nicolas Spiegelberg via Stack)
|
||||||
HBASE-2781 ZKW.createUnassignedRegion doesn't make sure existing znode is
|
HBASE-2781 ZKW.createUnassignedRegion doesn't make sure existing znode is
|
||||||
in the right state (Karthik Ranganathan via JD)
|
in the right state (Karthik Ranganathan via JD)
|
||||||
HBASE-2727 Splits writing one file only is untenable; need dir of recovered
|
HBASE-2727 Splits writing one file only is untenable; need dir of recovered
|
||||||
edits ordered by sequenceid
|
edits ordered by sequenceid
|
||||||
HBASE-2843 Readd bloomfilter test over zealously removed by HBASE-2625
|
HBASE-2843 Readd bloomfilter test over zealously removed by HBASE-2625
|
||||||
HBASE-2846 Make rest server be same as thrift and avro servers
|
HBASE-2846 Make rest server be same as thrift and avro servers
|
||||||
HBASE-1511 Pseudo distributed mode in LocalHBaseCluster
|
HBASE-1511 Pseudo distributed mode in LocalHBaseCluster
|
||||||
(Nicolas Spiegelberg via Stack)
|
(Nicolas Spiegelberg via Stack)
|
||||||
HBASE-2851 Remove testDynamicBloom() unit test
|
HBASE-2851 Remove testDynamicBloom() unit test
|
||||||
(Nicolas Spiegelberg via Stack)
|
(Nicolas Spiegelberg via Stack)
|
||||||
HBASE-2853 TestLoadIncrementalHFiles fails on TRUNK
|
HBASE-2853 TestLoadIncrementalHFiles fails on TRUNK
|
||||||
HBASE-2854 broken tests on trunk
|
HBASE-2854 broken tests on trunk
|
||||||
HBASE-2859 Cleanup deprecated stuff in TestHLog (Alex Newman via Stack)
|
HBASE-2859 Cleanup deprecated stuff in TestHLog (Alex Newman via Stack)
|
||||||
HBASE-2858 TestReplication.queueFailover fails half the time
|
HBASE-2858 TestReplication.queueFailover fails half the time
|
||||||
HBASE-2863 HBASE-2553 removed an important edge case
|
HBASE-2863 HBASE-2553 removed an important edge case
|
||||||
|
@ -1789,7 +1789,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-3064 Long sleeping in HConnectionManager after thread is interrupted
|
HBASE-3064 Long sleeping in HConnectionManager after thread is interrupted
|
||||||
(Bruno Dumon via Stack)
|
(Bruno Dumon via Stack)
|
||||||
HBASE-2753 Remove sorted() methods from Result now that Gets are Scans
|
HBASE-2753 Remove sorted() methods from Result now that Gets are Scans
|
||||||
HBASE-3059 TestReadWriteConsistencyControl occasionally hangs (Hairong
|
HBASE-3059 TestReadWriteConsistencyControl occasionally hangs (Hairong
|
||||||
via Ryan)
|
via Ryan)
|
||||||
HBASE-2906 [rest/stargate] URI decoding in RowResource
|
HBASE-2906 [rest/stargate] URI decoding in RowResource
|
||||||
HBASE-3008 Memstore.updateColumnValue passes wrong flag to heapSizeChange
|
HBASE-3008 Memstore.updateColumnValue passes wrong flag to heapSizeChange
|
||||||
|
@ -1820,7 +1820,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-3121 [rest] Do not perform cache control when returning results
|
HBASE-3121 [rest] Do not perform cache control when returning results
|
||||||
HBASE-2669 HCM.shutdownHook causes data loss with
|
HBASE-2669 HCM.shutdownHook causes data loss with
|
||||||
hbase.client.write.buffer != 0
|
hbase.client.write.buffer != 0
|
||||||
HBASE-2985 HRegionServer.multi() no longer calls HRegion.put(List) when
|
HBASE-2985 HRegionServer.multi() no longer calls HRegion.put(List) when
|
||||||
possible
|
possible
|
||||||
HBASE-3031 CopyTable MR job named "Copy Table" in Driver
|
HBASE-3031 CopyTable MR job named "Copy Table" in Driver
|
||||||
HBASE-2658 REST (stargate) TableRegionModel Regions need to be updated to
|
HBASE-2658 REST (stargate) TableRegionModel Regions need to be updated to
|
||||||
|
@ -1891,7 +1891,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-3199 large response handling: some fixups and cleanups
|
HBASE-3199 large response handling: some fixups and cleanups
|
||||||
HBASE-3212 More testing of enable/disable uncovered base condition not in
|
HBASE-3212 More testing of enable/disable uncovered base condition not in
|
||||||
place; i.e. that only one enable/disable runs at a time
|
place; i.e. that only one enable/disable runs at a time
|
||||||
HBASE-2898 MultiPut makes proper error handling impossible and leads to
|
HBASE-2898 MultiPut makes proper error handling impossible and leads to
|
||||||
corrupted data
|
corrupted data
|
||||||
HBASE-3213 If do abort of backup master will get NPE instead of graceful
|
HBASE-3213 If do abort of backup master will get NPE instead of graceful
|
||||||
abort
|
abort
|
||||||
|
@ -1904,7 +1904,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-3224 NPE in KeyValue$KVComparator.compare when compacting
|
HBASE-3224 NPE in KeyValue$KVComparator.compare when compacting
|
||||||
HBASE-3233 Fix Long Running Stats
|
HBASE-3233 Fix Long Running Stats
|
||||||
HBASE-3232 Fix KeyOnlyFilter + Add Value Length (Nicolas via Ryan)
|
HBASE-3232 Fix KeyOnlyFilter + Add Value Length (Nicolas via Ryan)
|
||||||
HBASE-3235 Intermittent incrementColumnValue failure in TestHRegion
|
HBASE-3235 Intermittent incrementColumnValue failure in TestHRegion
|
||||||
(Gary via Ryan)
|
(Gary via Ryan)
|
||||||
HBASE-3241 check to see if we exceeded hbase.regionserver.maxlogs limit is
|
HBASE-3241 check to see if we exceeded hbase.regionserver.maxlogs limit is
|
||||||
incorrect (Kannan Muthukkaruppan via JD)
|
incorrect (Kannan Muthukkaruppan via JD)
|
||||||
|
@ -1955,7 +1955,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-3352 enabling a non-existent table from shell prints no error
|
HBASE-3352 enabling a non-existent table from shell prints no error
|
||||||
HBASE-3353 table.jsp doesn't handle entries in META without server info
|
HBASE-3353 table.jsp doesn't handle entries in META without server info
|
||||||
HBASE-3351 ReplicationZookeeper goes to ZK every time a znode is modified
|
HBASE-3351 ReplicationZookeeper goes to ZK every time a znode is modified
|
||||||
HBASE-3326 Replication state's znode should be created else it
|
HBASE-3326 Replication state's znode should be created else it
|
||||||
defaults to false
|
defaults to false
|
||||||
HBASE-3355 Stopping a stopped cluster leaks an HMaster
|
HBASE-3355 Stopping a stopped cluster leaks an HMaster
|
||||||
HBASE-3356 Add more checks in replication if RS is stopped
|
HBASE-3356 Add more checks in replication if RS is stopped
|
||||||
|
@ -2060,8 +2060,8 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-1942 Update hadoop jars in trunk; update to r831142
|
HBASE-1942 Update hadoop jars in trunk; update to r831142
|
||||||
HBASE-1943 Remove AgileJSON; unused
|
HBASE-1943 Remove AgileJSON; unused
|
||||||
HBASE-1944 Add a "deferred log flush" attribute to HTD
|
HBASE-1944 Add a "deferred log flush" attribute to HTD
|
||||||
HBASE-1945 Remove META and ROOT memcache size bandaid
|
HBASE-1945 Remove META and ROOT memcache size bandaid
|
||||||
HBASE-1947 If HBase starts/stops often in less than 24 hours,
|
HBASE-1947 If HBase starts/stops often in less than 24 hours,
|
||||||
you end up with lots of store files
|
you end up with lots of store files
|
||||||
HBASE-1829 Make use of start/stop row in TableInputFormat
|
HBASE-1829 Make use of start/stop row in TableInputFormat
|
||||||
(Lars George via Stack)
|
(Lars George via Stack)
|
||||||
|
@ -2109,7 +2109,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
Stack)
|
Stack)
|
||||||
HBASE-2076 Many javadoc warnings
|
HBASE-2076 Many javadoc warnings
|
||||||
HBASE-2068 MetricsRate is missing "registry" parameter (Lars George via JD)
|
HBASE-2068 MetricsRate is missing "registry" parameter (Lars George via JD)
|
||||||
HBASE-2025 0.20.2 accessed from older client throws
|
HBASE-2025 0.20.2 accessed from older client throws
|
||||||
UndeclaredThrowableException; frustrates rolling upgrade
|
UndeclaredThrowableException; frustrates rolling upgrade
|
||||||
HBASE-2081 Set the retries higher in shell since client pause is lower
|
HBASE-2081 Set the retries higher in shell since client pause is lower
|
||||||
HBASE-1956 Export HDFS read and write latency as a metric
|
HBASE-1956 Export HDFS read and write latency as a metric
|
||||||
|
@ -2131,7 +2131,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
./bin/start-hbase.sh in a checkout
|
./bin/start-hbase.sh in a checkout
|
||||||
HBASE-2136 Forward-port the old mapred package
|
HBASE-2136 Forward-port the old mapred package
|
||||||
HBASE-2133 Increase default number of client handlers
|
HBASE-2133 Increase default number of client handlers
|
||||||
HBASE-2109 status 'simple' should show total requests per second, also
|
HBASE-2109 status 'simple' should show total requests per second, also
|
||||||
the requests/sec is wrong as is
|
the requests/sec is wrong as is
|
||||||
HBASE-2151 Remove onelab and include generated thrift classes in javadoc
|
HBASE-2151 Remove onelab and include generated thrift classes in javadoc
|
||||||
(Lars Francke via Stack)
|
(Lars Francke via Stack)
|
||||||
|
@ -2170,9 +2170,9 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2250 typo in the maven pom
|
HBASE-2250 typo in the maven pom
|
||||||
HBASE-2254 Improvements to the Maven POMs (Lars Francke via Stack)
|
HBASE-2254 Improvements to the Maven POMs (Lars Francke via Stack)
|
||||||
HBASE-2262 ZKW.ensureExists should check for existence
|
HBASE-2262 ZKW.ensureExists should check for existence
|
||||||
HBASE-2264 Adjust the contrib apps to the Maven project layout
|
HBASE-2264 Adjust the contrib apps to the Maven project layout
|
||||||
(Lars Francke via Lars George)
|
(Lars Francke via Lars George)
|
||||||
HBASE-2245 Unnecessary call to syncWal(region); in HRegionServer
|
HBASE-2245 Unnecessary call to syncWal(region); in HRegionServer
|
||||||
(Benoit Sigoure via JD)
|
(Benoit Sigoure via JD)
|
||||||
HBASE-2246 Add a getConfiguration method to HTableInterface
|
HBASE-2246 Add a getConfiguration method to HTableInterface
|
||||||
(Benoit Sigoure via JD)
|
(Benoit Sigoure via JD)
|
||||||
|
@ -2180,10 +2180,10 @@ Release 0.90.0 - January 19th, 2011
|
||||||
development (Alexey Kovyrin via Stack)
|
development (Alexey Kovyrin via Stack)
|
||||||
HBASE-2267 More improvements to the Maven build (Lars Francke via Stack)
|
HBASE-2267 More improvements to the Maven build (Lars Francke via Stack)
|
||||||
HBASE-2174 Stop from resolving HRegionServer addresses to names using DNS
|
HBASE-2174 Stop from resolving HRegionServer addresses to names using DNS
|
||||||
on every heartbeat (Karthik Ranganathan via Stack)
|
on every heartbeat (Karthik Ranganathan via Stack)
|
||||||
HBASE-2302 Optimize M-R by bulk excluding regions - less InputSplit-s to
|
HBASE-2302 Optimize M-R by bulk excluding regions - less InputSplit-s to
|
||||||
avoid traffic on region servers when performing M-R on a subset
|
avoid traffic on region servers when performing M-R on a subset
|
||||||
of the table (Kay Kay via Stack)
|
of the table (Kay Kay via Stack)
|
||||||
HBASE-2309 Add apache releases to pom (list of ) repositories
|
HBASE-2309 Add apache releases to pom (list of ) repositories
|
||||||
(Kay Kay via Stack)
|
(Kay Kay via Stack)
|
||||||
HBASE-2279 Hbase Shell does not have any tests (Alexey Kovyrin via Stack)
|
HBASE-2279 Hbase Shell does not have any tests (Alexey Kovyrin via Stack)
|
||||||
|
@ -2209,15 +2209,15 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2374 TableInputFormat - Configurable parameter to add column families
|
HBASE-2374 TableInputFormat - Configurable parameter to add column families
|
||||||
(Kay Kay via Stack)
|
(Kay Kay via Stack)
|
||||||
HBASE-2388 Give a very explicit message when we figure a big GC pause
|
HBASE-2388 Give a very explicit message when we figure a big GC pause
|
||||||
HBASE-2270 Improve how we handle recursive calls in ExplicitColumnTracker
|
HBASE-2270 Improve how we handle recursive calls in ExplicitColumnTracker
|
||||||
and WildcardColumnTracker
|
and WildcardColumnTracker
|
||||||
HBASE-2402 [stargate] set maxVersions on gets
|
HBASE-2402 [stargate] set maxVersions on gets
|
||||||
HBASE-2087 The wait on compaction because "Too many store files"
|
HBASE-2087 The wait on compaction because "Too many store files"
|
||||||
holds up all flushing
|
holds up all flushing
|
||||||
HBASE-2252 Mapping a very big table kills region servers
|
HBASE-2252 Mapping a very big table kills region servers
|
||||||
HBASE-2412 [stargate] PerformanceEvaluation
|
HBASE-2412 [stargate] PerformanceEvaluation
|
||||||
HBASE-2419 Remove from RS logs the fat NotServingRegionException stack
|
HBASE-2419 Remove from RS logs the fat NotServingRegionException stack
|
||||||
HBASE-2286 [Transactional Contrib] Correctly handle or avoid cases where
|
HBASE-2286 [Transactional Contrib] Correctly handle or avoid cases where
|
||||||
writes occur in same millisecond (Clint Morgan via J-D)
|
writes occur in same millisecond (Clint Morgan via J-D)
|
||||||
HBASE-2360 Make sure we have all the hadoop fixes in our our copy of its rpc
|
HBASE-2360 Make sure we have all the hadoop fixes in our our copy of its rpc
|
||||||
(Todd Lipcon via Stack)
|
(Todd Lipcon via Stack)
|
||||||
|
@ -2251,7 +2251,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
(Todd Lipcon via Stack)
|
(Todd Lipcon via Stack)
|
||||||
HBASE-2547 [mvn] assembly:assembly does not include hbase-X.X.X-test.jar
|
HBASE-2547 [mvn] assembly:assembly does not include hbase-X.X.X-test.jar
|
||||||
(Paul Smith via Stack)
|
(Paul Smith via Stack)
|
||||||
HBASE-2037 The core elements of HBASE-2037: refactoring flushing, and adding
|
HBASE-2037 The core elements of HBASE-2037: refactoring flushing, and adding
|
||||||
configurability in which HRegion subclass is instantiated
|
configurability in which HRegion subclass is instantiated
|
||||||
HBASE-2248 Provide new non-copy mechanism to assure atomic reads in get and scan
|
HBASE-2248 Provide new non-copy mechanism to assure atomic reads in get and scan
|
||||||
HBASE-2523 Add check for licenses before rolling an RC, add to
|
HBASE-2523 Add check for licenses before rolling an RC, add to
|
||||||
|
@ -2264,7 +2264,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2520 Cleanup arrays vs Lists of scanners (Todd Lipcon via Stack)
|
HBASE-2520 Cleanup arrays vs Lists of scanners (Todd Lipcon via Stack)
|
||||||
HBASE-2551 Forward port fixes that are in branch but not in trunk (part
|
HBASE-2551 Forward port fixes that are in branch but not in trunk (part
|
||||||
of the merge of old 0.20 into TRUNK task)
|
of the merge of old 0.20 into TRUNK task)
|
||||||
HBASE-2466 Improving filter API to allow for modification of keyvalue list
|
HBASE-2466 Improving filter API to allow for modification of keyvalue list
|
||||||
by filter (Juhani Connolly via Ryan)
|
by filter (Juhani Connolly via Ryan)
|
||||||
HBASE-2566 Remove 'lib' dir; it only has libthrift and that is being
|
HBASE-2566 Remove 'lib' dir; it only has libthrift and that is being
|
||||||
pulled from http://people.apache.org/~rawson/repo/....
|
pulled from http://people.apache.org/~rawson/repo/....
|
||||||
|
@ -2289,13 +2289,13 @@ Release 0.90.0 - January 19th, 2011
|
||||||
failing hudson on occasion)
|
failing hudson on occasion)
|
||||||
HBASE-2651 Allow alternate column separators to be specified for ImportTsv
|
HBASE-2651 Allow alternate column separators to be specified for ImportTsv
|
||||||
HBASE-2661 Add test case for row atomicity guarantee
|
HBASE-2661 Add test case for row atomicity guarantee
|
||||||
HBASE-2578 Add ability for tests to override server-side timestamp
|
HBASE-2578 Add ability for tests to override server-side timestamp
|
||||||
setting (currentTimeMillis) (Daniel Ploeg via Ryan Rawson)
|
setting (currentTimeMillis) (Daniel Ploeg via Ryan Rawson)
|
||||||
HBASE-2558 Our javadoc overview -- "Getting Started", requirements, etc. --
|
HBASE-2558 Our javadoc overview -- "Getting Started", requirements, etc. --
|
||||||
is not carried across by mvn javadoc:javadoc target
|
is not carried across by mvn javadoc:javadoc target
|
||||||
HBASE-2618 Don't inherit from HConstants (Benoit Sigoure via Stack)
|
HBASE-2618 Don't inherit from HConstants (Benoit Sigoure via Stack)
|
||||||
HBASE-2208 TableServers # processBatchOfRows - converts from List to [ ]
|
HBASE-2208 TableServers # processBatchOfRows - converts from List to [ ]
|
||||||
- Expensive copy
|
- Expensive copy
|
||||||
HBASE-2694 Move RS to Master region open/close messaging into ZooKeeper
|
HBASE-2694 Move RS to Master region open/close messaging into ZooKeeper
|
||||||
HBASE-2716 Make HBase's maven artifacts configurable with -D
|
HBASE-2716 Make HBase's maven artifacts configurable with -D
|
||||||
(Alex Newman via Stack)
|
(Alex Newman via Stack)
|
||||||
|
@ -2308,7 +2308,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
message
|
message
|
||||||
HBASE-2724 Update to new release of Guava library
|
HBASE-2724 Update to new release of Guava library
|
||||||
HBASE-2735 Make HBASE-2694 replication-friendly
|
HBASE-2735 Make HBASE-2694 replication-friendly
|
||||||
HBASE-2683 Make it obvious in the documentation that ZooKeeper needs
|
HBASE-2683 Make it obvious in the documentation that ZooKeeper needs
|
||||||
permanent storage
|
permanent storage
|
||||||
HBASE-2764 Force all Chore tasks to have a thread name
|
HBASE-2764 Force all Chore tasks to have a thread name
|
||||||
HBASE-2762 Add warning to master if running without append enabled
|
HBASE-2762 Add warning to master if running without append enabled
|
||||||
|
@ -2319,7 +2319,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
(Nicolas Spiegelberg via JD)
|
(Nicolas Spiegelberg via JD)
|
||||||
HBASE-2786 TestHLog.testSplit hangs (Nicolas Spiegelberg via JD)
|
HBASE-2786 TestHLog.testSplit hangs (Nicolas Spiegelberg via JD)
|
||||||
HBASE-2790 Purge apache-forrest from TRUNK
|
HBASE-2790 Purge apache-forrest from TRUNK
|
||||||
HBASE-2793 Add ability to extract a specified list of versions of a column
|
HBASE-2793 Add ability to extract a specified list of versions of a column
|
||||||
in a single roundtrip (Kannan via Ryan)
|
in a single roundtrip (Kannan via Ryan)
|
||||||
HBASE-2828 HTable unnecessarily coupled with HMaster
|
HBASE-2828 HTable unnecessarily coupled with HMaster
|
||||||
(Nicolas Spiegelberg via Stack)
|
(Nicolas Spiegelberg via Stack)
|
||||||
|
@ -2331,7 +2331,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
next column (Pranav via jgray)
|
next column (Pranav via jgray)
|
||||||
HBASE-2835 Update hadoop jar to head of branch-0.20-append to catch three
|
HBASE-2835 Update hadoop jar to head of branch-0.20-append to catch three
|
||||||
added patches
|
added patches
|
||||||
HBASE-2840 Remove the final remnants of the old Get code - the query matchers
|
HBASE-2840 Remove the final remnants of the old Get code - the query matchers
|
||||||
and other helper classes
|
and other helper classes
|
||||||
HBASE-2845 Small edit of shell main help page cutting down some on white
|
HBASE-2845 Small edit of shell main help page cutting down some on white
|
||||||
space and text
|
space and text
|
||||||
|
@ -2360,9 +2360,9 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-1517 Implement inexpensive seek operations in HFile (Pranav via Ryan)
|
HBASE-1517 Implement inexpensive seek operations in HFile (Pranav via Ryan)
|
||||||
HBASE-2903 ColumnPrefix filtering (Pranav via Ryan)
|
HBASE-2903 ColumnPrefix filtering (Pranav via Ryan)
|
||||||
HBASE-2904 Smart seeking using filters (Pranav via Ryan)
|
HBASE-2904 Smart seeking using filters (Pranav via Ryan)
|
||||||
HBASE-2922 HLog preparation and cleanup are done under the updateLock,
|
HBASE-2922 HLog preparation and cleanup are done under the updateLock,
|
||||||
major slowdown
|
major slowdown
|
||||||
HBASE-1845 MultiGet, MultiDelete, and MultiPut - batched to the
|
HBASE-1845 MultiGet, MultiDelete, and MultiPut - batched to the
|
||||||
appropriate region servers (Marc Limotte via Ryan)
|
appropriate region servers (Marc Limotte via Ryan)
|
||||||
HBASE-2867 Have master show its address using hostname rather than IP
|
HBASE-2867 Have master show its address using hostname rather than IP
|
||||||
HBASE-2696 ZooKeeper cleanup and refactor
|
HBASE-2696 ZooKeeper cleanup and refactor
|
||||||
|
@ -2375,7 +2375,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-2857 HBaseAdmin.tableExists() should not require a full meta scan
|
HBASE-2857 HBaseAdmin.tableExists() should not require a full meta scan
|
||||||
HBASE-2962 Add missing methods to HTableInterface (and HTable)
|
HBASE-2962 Add missing methods to HTableInterface (and HTable)
|
||||||
(Lars Francke via Stack)
|
(Lars Francke via Stack)
|
||||||
HBASE-2942 Custom filters should not require registration in
|
HBASE-2942 Custom filters should not require registration in
|
||||||
HBaseObjectWritable (Gary Helmling via Andrew Purtell)
|
HBaseObjectWritable (Gary Helmling via Andrew Purtell)
|
||||||
HBASE-2976 Running HFile tool passing fully-qualified filename I get
|
HBASE-2976 Running HFile tool passing fully-qualified filename I get
|
||||||
'IllegalArgumentException: Wrong FS'
|
'IllegalArgumentException: Wrong FS'
|
||||||
|
@ -2417,7 +2417,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-3133 Only log compaction requests when a request is actually added
|
HBASE-3133 Only log compaction requests when a request is actually added
|
||||||
to the queue
|
to the queue
|
||||||
HBASE-3132 Print TimestampRange and BloomFilters in HFile pretty print
|
HBASE-3132 Print TimestampRange and BloomFilters in HFile pretty print
|
||||||
HBASE-2514 RegionServer should refuse to be assigned a region that use
|
HBASE-2514 RegionServer should refuse to be assigned a region that use
|
||||||
LZO when LZO isn't available
|
LZO when LZO isn't available
|
||||||
HBASE-3082 For ICV gets, first look in MemStore before reading StoreFiles
|
HBASE-3082 For ICV gets, first look in MemStore before reading StoreFiles
|
||||||
(prakash via jgray)
|
(prakash via jgray)
|
||||||
|
@ -2548,7 +2548,7 @@ Release 0.90.0 - January 19th, 2011
|
||||||
HBASE-410 [testing] Speed up the test suite
|
HBASE-410 [testing] Speed up the test suite
|
||||||
HBASE-2041 Change WAL default configuration values
|
HBASE-2041 Change WAL default configuration values
|
||||||
HBASE-2997 Performance fixes - profiler driven
|
HBASE-2997 Performance fixes - profiler driven
|
||||||
HBASE-2450 For single row reads of specific columns, seek to the
|
HBASE-2450 For single row reads of specific columns, seek to the
|
||||||
first column in HFiles rather than start of row
|
first column in HFiles rather than start of row
|
||||||
(Pranav via Ryan, some Ryan)
|
(Pranav via Ryan, some Ryan)
|
||||||
|
|
||||||
|
@ -2615,8 +2615,8 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
||||||
HBASE-1243 oldlogfile.dat is screwed, so is it's region
|
HBASE-1243 oldlogfile.dat is screwed, so is it's region
|
||||||
HBASE-1169 When a shutdown is requested, stop scanning META regions
|
HBASE-1169 When a shutdown is requested, stop scanning META regions
|
||||||
immediately
|
immediately
|
||||||
HBASE-1251 HConnectionManager.getConnection(HBaseConfiguration) returns
|
HBASE-1251 HConnectionManager.getConnection(HBaseConfiguration) returns
|
||||||
same HConnection for different HBaseConfigurations
|
same HConnection for different HBaseConfigurations
|
||||||
HBASE-1157, HBASE-1156 If we do not take start code as a part of region
|
HBASE-1157, HBASE-1156 If we do not take start code as a part of region
|
||||||
server recovery, we could inadvertantly try to reassign regions
|
server recovery, we could inadvertantly try to reassign regions
|
||||||
assigned to a restarted server with a different start code;
|
assigned to a restarted server with a different start code;
|
||||||
|
@ -2675,7 +2675,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
||||||
(Thomas Schneider via Andrew Purtell)
|
(Thomas Schneider via Andrew Purtell)
|
||||||
HBASE-1374 NPE out of ZooKeeperWrapper.loadZooKeeperConfig
|
HBASE-1374 NPE out of ZooKeeperWrapper.loadZooKeeperConfig
|
||||||
HBASE-1336 Splitting up the compare of family+column into 2 different
|
HBASE-1336 Splitting up the compare of family+column into 2 different
|
||||||
compare
|
compare
|
||||||
HBASE-1377 RS address is null in master web UI
|
HBASE-1377 RS address is null in master web UI
|
||||||
HBASE-1344 WARN IllegalStateException: Cannot set a region as open if it
|
HBASE-1344 WARN IllegalStateException: Cannot set a region as open if it
|
||||||
has not been pending
|
has not been pending
|
||||||
|
@ -2737,7 +2737,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
||||||
binary comparator (Jon Gray via Stack)
|
binary comparator (Jon Gray via Stack)
|
||||||
HBASE-1500 KeyValue$KeyComparator array overrun
|
HBASE-1500 KeyValue$KeyComparator array overrun
|
||||||
HBASE-1513 Compactions too slow
|
HBASE-1513 Compactions too slow
|
||||||
HBASE-1516 Investigate if StoreScanner will not return the next row if
|
HBASE-1516 Investigate if StoreScanner will not return the next row if
|
||||||
earlied-out of previous row (Jon Gray)
|
earlied-out of previous row (Jon Gray)
|
||||||
HBASE-1520 StoreFileScanner catches and ignore IOExceptions from HFile
|
HBASE-1520 StoreFileScanner catches and ignore IOExceptions from HFile
|
||||||
HBASE-1522 We delete splits before their time occasionally
|
HBASE-1522 We delete splits before their time occasionally
|
||||||
|
@ -2848,7 +2848,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
||||||
when trying to read
|
when trying to read
|
||||||
HBASE-1705 Thrift server: deletes in mutateRow/s don't delete
|
HBASE-1705 Thrift server: deletes in mutateRow/s don't delete
|
||||||
(Tim Sell and Ryan Rawson via Stack)
|
(Tim Sell and Ryan Rawson via Stack)
|
||||||
HBASE-1703 ICVs across /during a flush can cause multiple keys with the
|
HBASE-1703 ICVs across /during a flush can cause multiple keys with the
|
||||||
same TS (bad)
|
same TS (bad)
|
||||||
HBASE-1671 HBASE-1609 broke scanners riding across splits
|
HBASE-1671 HBASE-1609 broke scanners riding across splits
|
||||||
HBASE-1717 Put on client-side uses passed-in byte[]s rather than always
|
HBASE-1717 Put on client-side uses passed-in byte[]s rather than always
|
||||||
|
@ -2921,9 +2921,9 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
||||||
(Toby White via Andrew Purtell)
|
(Toby White via Andrew Purtell)
|
||||||
HBASE-1180 Add missing import statements to SampleUploader and remove
|
HBASE-1180 Add missing import statements to SampleUploader and remove
|
||||||
unnecessary @Overrides (Ryan Smith via Andrew Purtell)
|
unnecessary @Overrides (Ryan Smith via Andrew Purtell)
|
||||||
HBASE-1191 ZooKeeper ensureParentExists calls fail
|
HBASE-1191 ZooKeeper ensureParentExists calls fail
|
||||||
on absolute path (Nitay Joffe via Jean-Daniel Cryans)
|
on absolute path (Nitay Joffe via Jean-Daniel Cryans)
|
||||||
HBASE-1187 After disabling/enabling a table, the regions seems to
|
HBASE-1187 After disabling/enabling a table, the regions seems to
|
||||||
be assigned to only 1-2 region servers
|
be assigned to only 1-2 region servers
|
||||||
HBASE-1210 Allow truncation of output for scan and get commands in shell
|
HBASE-1210 Allow truncation of output for scan and get commands in shell
|
||||||
(Lars George via Stack)
|
(Lars George via Stack)
|
||||||
|
@ -2955,7 +2955,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
||||||
(Nitay Joffe via Stack)
|
(Nitay Joffe via Stack)
|
||||||
HBASE-1285 Forcing compactions should be available via thrift
|
HBASE-1285 Forcing compactions should be available via thrift
|
||||||
(Tim Sell via Stack)
|
(Tim Sell via Stack)
|
||||||
HBASE-1186 Memory-aware Maps with LRU eviction for cell cache
|
HBASE-1186 Memory-aware Maps with LRU eviction for cell cache
|
||||||
(Jonathan Gray via Andrew Purtell)
|
(Jonathan Gray via Andrew Purtell)
|
||||||
HBASE-1205 RegionServers should find new master when a new master comes up
|
HBASE-1205 RegionServers should find new master when a new master comes up
|
||||||
(Nitay Joffe via Andrew Purtell)
|
(Nitay Joffe via Andrew Purtell)
|
||||||
|
@ -3033,7 +3033,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
||||||
HBASE-1466 Binary keys are not first class citizens
|
HBASE-1466 Binary keys are not first class citizens
|
||||||
(Ryan Rawson via Stack)
|
(Ryan Rawson via Stack)
|
||||||
HBASE-1445 Add the ability to start a master from any machine
|
HBASE-1445 Add the ability to start a master from any machine
|
||||||
HBASE-1474 Add zk attributes to list of attributes
|
HBASE-1474 Add zk attributes to list of attributes
|
||||||
in master and regionserver UIs
|
in master and regionserver UIs
|
||||||
HBASE-1448 Add a node in ZK to tell all masters to shutdown
|
HBASE-1448 Add a node in ZK to tell all masters to shutdown
|
||||||
HBASE-1478 Remove hbase master options from shell (Nitay Joffe via Stack)
|
HBASE-1478 Remove hbase master options from shell (Nitay Joffe via Stack)
|
||||||
|
@ -3042,7 +3042,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
||||||
HBASE-1490 Update ZooKeeper library
|
HBASE-1490 Update ZooKeeper library
|
||||||
HBASE-1489 Basic git ignores for people who use git and eclipse
|
HBASE-1489 Basic git ignores for people who use git and eclipse
|
||||||
HBASE-1453 Add HADOOP-4681 to our bundled hadoop, add to 'gettting started'
|
HBASE-1453 Add HADOOP-4681 to our bundled hadoop, add to 'gettting started'
|
||||||
recommendation that hbase users backport
|
recommendation that hbase users backport
|
||||||
HBASE-1507 iCMS as default JVM
|
HBASE-1507 iCMS as default JVM
|
||||||
HBASE-1509 Add explanation to shell "help" command on how to use binarykeys
|
HBASE-1509 Add explanation to shell "help" command on how to use binarykeys
|
||||||
(Lars George via Stack)
|
(Lars George via Stack)
|
||||||
|
@ -3054,7 +3054,7 @@ Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
||||||
on hbase-user traffic
|
on hbase-user traffic
|
||||||
HBASE-1539 prevent aborts due to missing zoo.cfg
|
HBASE-1539 prevent aborts due to missing zoo.cfg
|
||||||
HBASE-1488 Fix TestThriftServer and re-enable it
|
HBASE-1488 Fix TestThriftServer and re-enable it
|
||||||
HBASE-1541 Scanning multiple column families in the presence of deleted
|
HBASE-1541 Scanning multiple column families in the presence of deleted
|
||||||
families results in bad scans
|
families results in bad scans
|
||||||
HBASE-1540 Client delete unit test, define behavior
|
HBASE-1540 Client delete unit test, define behavior
|
||||||
(Jonathan Gray via Stack)
|
(Jonathan Gray via Stack)
|
||||||
|
@ -3161,13 +3161,13 @@ Release 0.19.0 - 01/21/2009
|
||||||
HBASE-906 [shell] Truncates output
|
HBASE-906 [shell] Truncates output
|
||||||
HBASE-912 PE is broken when other tables exist
|
HBASE-912 PE is broken when other tables exist
|
||||||
HBASE-853 [shell] Cannot describe meta tables (Izaak Rubin via Stack)
|
HBASE-853 [shell] Cannot describe meta tables (Izaak Rubin via Stack)
|
||||||
HBASE-844 Can't pass script to hbase shell
|
HBASE-844 Can't pass script to hbase shell
|
||||||
HBASE-837 Add unit tests for ThriftServer.HBaseHandler (Izaak Rubin via
|
HBASE-837 Add unit tests for ThriftServer.HBaseHandler (Izaak Rubin via
|
||||||
Stack)
|
Stack)
|
||||||
HBASE-913 Classes using log4j directly
|
HBASE-913 Classes using log4j directly
|
||||||
HBASE-914 MSG_REPORT_CLOSE has a byte array for a message
|
HBASE-914 MSG_REPORT_CLOSE has a byte array for a message
|
||||||
HBASE-918 Region balancing during startup makes cluster unstable
|
HBASE-918 Region balancing during startup makes cluster unstable
|
||||||
HBASE-921 region close and open processed out of order; makes for
|
HBASE-921 region close and open processed out of order; makes for
|
||||||
disagreement between master and regionserver on region state
|
disagreement between master and regionserver on region state
|
||||||
HBASE-925 HRS NPE on way out if no master to connect to
|
HBASE-925 HRS NPE on way out if no master to connect to
|
||||||
HBASE-928 NPE throwing RetriesExhaustedException
|
HBASE-928 NPE throwing RetriesExhaustedException
|
||||||
|
@ -3277,7 +3277,7 @@ Release 0.19.0 - 01/21/2009
|
||||||
crashed server; regionserver tries to execute incomplete log
|
crashed server; regionserver tries to execute incomplete log
|
||||||
HBASE-1104, HBASE-1098, HBASE-1096: Doubly-assigned regions redux,
|
HBASE-1104, HBASE-1098, HBASE-1096: Doubly-assigned regions redux,
|
||||||
IllegalStateException: Cannot set a region to be closed it it was
|
IllegalStateException: Cannot set a region to be closed it it was
|
||||||
not already marked as closing, Does not recover if HRS carrying
|
not already marked as closing, Does not recover if HRS carrying
|
||||||
-ROOT- goes down
|
-ROOT- goes down
|
||||||
HBASE-1114 Weird NPEs compacting
|
HBASE-1114 Weird NPEs compacting
|
||||||
HBASE-1116 generated web.xml and svn don't play nice together
|
HBASE-1116 generated web.xml and svn don't play nice together
|
||||||
|
@ -3320,7 +3320,7 @@ Release 0.19.0 - 01/21/2009
|
||||||
HBASE-949 Add an HBase Manual
|
HBASE-949 Add an HBase Manual
|
||||||
HBASE-839 Update hadoop libs in hbase; move hbase TRUNK on to an hadoop
|
HBASE-839 Update hadoop libs in hbase; move hbase TRUNK on to an hadoop
|
||||||
0.19.0 RC
|
0.19.0 RC
|
||||||
HBASE-785 Remove InfoServer, use HADOOP-3824 StatusHttpServer
|
HBASE-785 Remove InfoServer, use HADOOP-3824 StatusHttpServer
|
||||||
instead (requires hadoop 0.19)
|
instead (requires hadoop 0.19)
|
||||||
HBASE-81 When a scanner lease times out, throw a more "user friendly" exception
|
HBASE-81 When a scanner lease times out, throw a more "user friendly" exception
|
||||||
HBASE-978 Remove BloomFilterDescriptor. It is no longer used.
|
HBASE-978 Remove BloomFilterDescriptor. It is no longer used.
|
||||||
|
@ -3396,7 +3396,7 @@ Release 0.18.0 - September 21st, 2008
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
HBASE-881 Fixed bug when Master tries to reassign split or offline regions
|
HBASE-881 Fixed bug when Master tries to reassign split or offline regions
|
||||||
from a dead server
|
from a dead server
|
||||||
HBASE-860 Fixed Bug in IndexTableReduce where it concerns writing lucene
|
HBASE-860 Fixed Bug in IndexTableReduce where it concerns writing lucene
|
||||||
index fields.
|
index fields.
|
||||||
HBASE-805 Remove unnecessary getRow overloads in HRS (Jonathan Gray via
|
HBASE-805 Remove unnecessary getRow overloads in HRS (Jonathan Gray via
|
||||||
Jim Kellerman) (Fix whitespace diffs in HRegionServer)
|
Jim Kellerman) (Fix whitespace diffs in HRegionServer)
|
||||||
|
@ -3504,8 +3504,8 @@ Release 0.2.0 - August 8, 2008.
|
||||||
HBASE-487 Replace hql w/ a hbase-friendly jirb or jython shell
|
HBASE-487 Replace hql w/ a hbase-friendly jirb or jython shell
|
||||||
Part 1: purge of hql and added raw jirb in its place.
|
Part 1: purge of hql and added raw jirb in its place.
|
||||||
HBASE-521 Improve client scanner interface
|
HBASE-521 Improve client scanner interface
|
||||||
HBASE-288 Add in-memory caching of data. Required update of hadoop to
|
HBASE-288 Add in-memory caching of data. Required update of hadoop to
|
||||||
0.17.0-dev.2008-02-07_12-01-58. (Tom White via Stack)
|
0.17.0-dev.2008-02-07_12-01-58. (Tom White via Stack)
|
||||||
HBASE-696 Make bloomfilter true/false and self-sizing
|
HBASE-696 Make bloomfilter true/false and self-sizing
|
||||||
HBASE-720 clean up inconsistencies around deletes (Izaak Rubin via Stack)
|
HBASE-720 clean up inconsistencies around deletes (Izaak Rubin via Stack)
|
||||||
HBASE-796 Deprecates Text methods from HTable
|
HBASE-796 Deprecates Text methods from HTable
|
||||||
|
@ -3577,7 +3577,7 @@ Release 0.2.0 - August 8, 2008.
|
||||||
HBASE-715 Base HBase 0.2 on Hadoop 0.17.1
|
HBASE-715 Base HBase 0.2 on Hadoop 0.17.1
|
||||||
HBASE-718 hbase shell help info
|
HBASE-718 hbase shell help info
|
||||||
HBASE-717 alter table broke with new shell returns InvalidColumnNameException
|
HBASE-717 alter table broke with new shell returns InvalidColumnNameException
|
||||||
HBASE-573 HBase does not read hadoop-*.xml for dfs configuration after
|
HBASE-573 HBase does not read hadoop-*.xml for dfs configuration after
|
||||||
moving out hadoop/contrib
|
moving out hadoop/contrib
|
||||||
HBASE-11 Unexpected exits corrupt DFS
|
HBASE-11 Unexpected exits corrupt DFS
|
||||||
HBASE-12 When hbase regionserver restarts, it says "impossible state for
|
HBASE-12 When hbase regionserver restarts, it says "impossible state for
|
||||||
|
@ -3632,7 +3632,7 @@ Release 0.2.0 - August 8, 2008.
|
||||||
HBASE-8 Delete table does not remove the table directory in the FS
|
HBASE-8 Delete table does not remove the table directory in the FS
|
||||||
HBASE-428 Under continuous upload of rows, WrongRegionExceptions are thrown
|
HBASE-428 Under continuous upload of rows, WrongRegionExceptions are thrown
|
||||||
that reach the client even after retries
|
that reach the client even after retries
|
||||||
HBASE-460 TestMigrate broken when HBase moved to subproject
|
HBASE-460 TestMigrate broken when HBase moved to subproject
|
||||||
HBASE-462 Update migration tool
|
HBASE-462 Update migration tool
|
||||||
HBASE-473 When a table is deleted, master sends multiple close messages to
|
HBASE-473 When a table is deleted, master sends multiple close messages to
|
||||||
the region server
|
the region server
|
||||||
|
@ -3656,7 +3656,7 @@ Release 0.2.0 - August 8, 2008.
|
||||||
HBASE-537 Wait for hdfs to exit safe mode
|
HBASE-537 Wait for hdfs to exit safe mode
|
||||||
HBASE-476 RegexpRowFilter behaves incorectly when there are multiple store
|
HBASE-476 RegexpRowFilter behaves incorectly when there are multiple store
|
||||||
files (Clint Morgan via Jim Kellerman)
|
files (Clint Morgan via Jim Kellerman)
|
||||||
HBASE-527 RegexpRowFilter does not work when there are columns from
|
HBASE-527 RegexpRowFilter does not work when there are columns from
|
||||||
multiple families (Clint Morgan via Jim Kellerman)
|
multiple families (Clint Morgan via Jim Kellerman)
|
||||||
HBASE-534 Double-assignment at SPLIT-time
|
HBASE-534 Double-assignment at SPLIT-time
|
||||||
HBASE-712 midKey found compacting is the first, not necessarily the optimal
|
HBASE-712 midKey found compacting is the first, not necessarily the optimal
|
||||||
|
@ -3721,13 +3721,13 @@ Release 0.2.0 - August 8, 2008.
|
||||||
HBASE-790 During import, single region blocks requests for >10 minutes,
|
HBASE-790 During import, single region blocks requests for >10 minutes,
|
||||||
thread dumps, throws out pending requests, and continues
|
thread dumps, throws out pending requests, and continues
|
||||||
(Jonathan Gray via Stack)
|
(Jonathan Gray via Stack)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-559 MR example job to count table rows
|
HBASE-559 MR example job to count table rows
|
||||||
HBASE-596 DemoClient.py (Ivan Begtin via Stack)
|
HBASE-596 DemoClient.py (Ivan Begtin via Stack)
|
||||||
HBASE-581 Allow adding filters to TableInputFormat (At same time, ensure TIF
|
HBASE-581 Allow adding filters to TableInputFormat (At same time, ensure TIF
|
||||||
is subclassable) (David Alves via Stack)
|
is subclassable) (David Alves via Stack)
|
||||||
HBASE-603 When an exception bubbles out of getRegionServerWithRetries, wrap
|
HBASE-603 When an exception bubbles out of getRegionServerWithRetries, wrap
|
||||||
the exception with a RetriesExhaustedException
|
the exception with a RetriesExhaustedException
|
||||||
HBASE-600 Filters have excessive DEBUG logging
|
HBASE-600 Filters have excessive DEBUG logging
|
||||||
HBASE-611 regionserver should do basic health check before reporting
|
HBASE-611 regionserver should do basic health check before reporting
|
||||||
|
@ -3789,7 +3789,7 @@ Release 0.2.0 - August 8, 2008.
|
||||||
HMaster (Bryan Duxbury via Stack)
|
HMaster (Bryan Duxbury via Stack)
|
||||||
HBASE-440 Add optional log roll interval so that log files are garbage
|
HBASE-440 Add optional log roll interval so that log files are garbage
|
||||||
collected
|
collected
|
||||||
HBASE-407 Keep HRegionLocation information in LRU structure
|
HBASE-407 Keep HRegionLocation information in LRU structure
|
||||||
HBASE-444 hbase is very slow at determining table is not present
|
HBASE-444 hbase is very slow at determining table is not present
|
||||||
HBASE-438 XMLOutputter state should be initialized.
|
HBASE-438 XMLOutputter state should be initialized.
|
||||||
HBASE-414 Move client classes into client package
|
HBASE-414 Move client classes into client package
|
||||||
|
@ -3801,7 +3801,7 @@ Release 0.2.0 - August 8, 2008.
|
||||||
HBASE-464 HBASE-419 introduced javadoc errors
|
HBASE-464 HBASE-419 introduced javadoc errors
|
||||||
HBASE-468 Move HStoreKey back to o.a.h.h
|
HBASE-468 Move HStoreKey back to o.a.h.h
|
||||||
HBASE-442 Move internal classes out of HRegionServer
|
HBASE-442 Move internal classes out of HRegionServer
|
||||||
HBASE-466 Move HMasterInterface, HRegionInterface, and
|
HBASE-466 Move HMasterInterface, HRegionInterface, and
|
||||||
HMasterRegionInterface into o.a.h.h.ipc
|
HMasterRegionInterface into o.a.h.h.ipc
|
||||||
HBASE-479 Speed up TestLogRolling
|
HBASE-479 Speed up TestLogRolling
|
||||||
HBASE-480 Tool to manually merge two regions
|
HBASE-480 Tool to manually merge two regions
|
||||||
|
@ -3851,7 +3851,7 @@ Release 0.2.0 - August 8, 2008.
|
||||||
timestamps
|
timestamps
|
||||||
HBASE-511 Do exponential backoff in clients on NSRE, WRE, ISE, etc.
|
HBASE-511 Do exponential backoff in clients on NSRE, WRE, ISE, etc.
|
||||||
(Andrew Purtell via Jim Kellerman)
|
(Andrew Purtell via Jim Kellerman)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
HBASE-430 Performance: Scanners and getRow return maps with duplicate data
|
HBASE-430 Performance: Scanners and getRow return maps with duplicate data
|
||||||
|
|
||||||
|
@ -3867,7 +3867,7 @@ Release 0.1.3 - 07/25/2008
|
||||||
HBASE-648 If mapfile index is empty, run repair
|
HBASE-648 If mapfile index is empty, run repair
|
||||||
HBASE-659 HLog#cacheFlushLock not cleared; hangs a region
|
HBASE-659 HLog#cacheFlushLock not cleared; hangs a region
|
||||||
HBASE-663 Incorrect sequence number for cache flush
|
HBASE-663 Incorrect sequence number for cache flush
|
||||||
HBASE-652 Dropping table fails silently if table isn't disabled
|
HBASE-652 Dropping table fails silently if table isn't disabled
|
||||||
HBASE-674 Memcache size unreliable
|
HBASE-674 Memcache size unreliable
|
||||||
HBASE-665 server side scanner doesn't honor stop row
|
HBASE-665 server side scanner doesn't honor stop row
|
||||||
HBASE-681 NPE in Memcache (Clint Morgan via Jim Kellerman)
|
HBASE-681 NPE in Memcache (Clint Morgan via Jim Kellerman)
|
||||||
|
@ -3918,7 +3918,7 @@ Release 0.1.2 - 05/13/2008
|
||||||
HBASE-618 We always compact if 2 files, regardless of the compaction threshold setting
|
HBASE-618 We always compact if 2 files, regardless of the compaction threshold setting
|
||||||
HBASE-619 Fix 'logs' link in UI
|
HBASE-619 Fix 'logs' link in UI
|
||||||
HBASE-620 testmergetool failing in branch and trunk since hbase-618 went in
|
HBASE-620 testmergetool failing in branch and trunk since hbase-618 went in
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-559 MR example job to count table rows
|
HBASE-559 MR example job to count table rows
|
||||||
HBASE-578 Upgrade branch to 0.16.3 hadoop.
|
HBASE-578 Upgrade branch to 0.16.3 hadoop.
|
||||||
|
@ -3952,7 +3952,7 @@ Release 0.1.1 - 04/11/2008
|
||||||
Release 0.1.0
|
Release 0.1.0
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
HADOOP-2750 Deprecated methods startBatchUpdate, commitBatch, abortBatch,
|
HADOOP-2750 Deprecated methods startBatchUpdate, commitBatch, abortBatch,
|
||||||
and renewLease have been removed from HTable (Bryan Duxbury via
|
and renewLease have been removed from HTable (Bryan Duxbury via
|
||||||
Jim Kellerman)
|
Jim Kellerman)
|
||||||
HADOOP-2786 Move hbase out of hadoop core
|
HADOOP-2786 Move hbase out of hadoop core
|
||||||
|
@ -3961,7 +3961,7 @@ Release 0.1.0
|
||||||
with a hbase from 0.16.0
|
with a hbase from 0.16.0
|
||||||
|
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
HBASE-506 When an exception has to escape ServerCallable due to exhausted retries,
|
HBASE-506 When an exception has to escape ServerCallable due to exhausted retries,
|
||||||
show all the exceptions that lead to this situation
|
show all the exceptions that lead to this situation
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
@ -3997,7 +3997,7 @@ Release 0.1.0
|
||||||
HBASE-514 table 'does not exist' when it does
|
HBASE-514 table 'does not exist' when it does
|
||||||
HBASE-537 Wait for hdfs to exit safe mode
|
HBASE-537 Wait for hdfs to exit safe mode
|
||||||
HBASE-534 Double-assignment at SPLIT-time
|
HBASE-534 Double-assignment at SPLIT-time
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid
|
HADOOP-2555 Refactor the HTable#get and HTable#getRow methods to avoid
|
||||||
repetition of retry-on-failure logic (thanks to Peter Dolan and
|
repetition of retry-on-failure logic (thanks to Peter Dolan and
|
||||||
|
@ -4006,22 +4006,22 @@ Release 0.1.0
|
||||||
HBASE-480 Tool to manually merge two regions
|
HBASE-480 Tool to manually merge two regions
|
||||||
HBASE-477 Add support for an HBASE_CLASSPATH
|
HBASE-477 Add support for an HBASE_CLASSPATH
|
||||||
HBASE-515 At least double default timeouts between regionserver and master
|
HBASE-515 At least double default timeouts between regionserver and master
|
||||||
HBASE-482 package-level javadoc should have example client or at least
|
HBASE-482 package-level javadoc should have example client or at least
|
||||||
point at the FAQ
|
point at the FAQ
|
||||||
HBASE-497 RegionServer needs to recover if datanode goes down
|
HBASE-497 RegionServer needs to recover if datanode goes down
|
||||||
HBASE-456 Clearly state which ports need to be opened in order to run HBase
|
HBASE-456 Clearly state which ports need to be opened in order to run HBase
|
||||||
HBASE-483 Merge tool won't merge two overlapping regions
|
HBASE-483 Merge tool won't merge two overlapping regions
|
||||||
HBASE-476 RegexpRowFilter behaves incorectly when there are multiple store
|
HBASE-476 RegexpRowFilter behaves incorectly when there are multiple store
|
||||||
files (Clint Morgan via Jim Kellerman)
|
files (Clint Morgan via Jim Kellerman)
|
||||||
HBASE-527 RegexpRowFilter does not work when there are columns from
|
HBASE-527 RegexpRowFilter does not work when there are columns from
|
||||||
multiple families (Clint Morgan via Jim Kellerman)
|
multiple families (Clint Morgan via Jim Kellerman)
|
||||||
|
|
||||||
Release 0.16.0
|
Release 0.16.0
|
||||||
|
|
||||||
2008/02/04 HBase is now a subproject of Hadoop. The first HBase release as
|
2008/02/04 HBase is now a subproject of Hadoop. The first HBase release as
|
||||||
a subproject will be release 0.1.0 which will be equivalent to
|
a subproject will be release 0.1.0 which will be equivalent to
|
||||||
the version of HBase included in Hadoop 0.16.0. In order to
|
the version of HBase included in Hadoop 0.16.0. In order to
|
||||||
accomplish this, the HBase portion of HBASE-288 (formerly
|
accomplish this, the HBase portion of HBASE-288 (formerly
|
||||||
HADOOP-1398) has been backed out. Once 0.1.0 is frozen (depending
|
HADOOP-1398) has been backed out. Once 0.1.0 is frozen (depending
|
||||||
mostly on changes to infrastructure due to becoming a sub project
|
mostly on changes to infrastructure due to becoming a sub project
|
||||||
instead of a contrib project), this patch will re-appear on HBase
|
instead of a contrib project), this patch will re-appear on HBase
|
||||||
|
@ -4030,7 +4030,7 @@ Release 0.16.0
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
HADOOP-2056 A table with row keys containing colon fails to split regions
|
HADOOP-2056 A table with row keys containing colon fails to split regions
|
||||||
HADOOP-2079 Fix generated HLog, HRegion names
|
HADOOP-2079 Fix generated HLog, HRegion names
|
||||||
HADOOP-2495 Minor performance improvements: Slim-down BatchOperation, etc.
|
HADOOP-2495 Minor performance improvements: Slim-down BatchOperation, etc.
|
||||||
HADOOP-2506 Remove the algebra package
|
HADOOP-2506 Remove the algebra package
|
||||||
HADOOP-2519 Performance improvements: Customized RPC serialization
|
HADOOP-2519 Performance improvements: Customized RPC serialization
|
||||||
HADOOP-2478 Restructure how HBase lays out files in the file system (phase 1)
|
HADOOP-2478 Restructure how HBase lays out files in the file system (phase 1)
|
||||||
|
@ -4155,7 +4155,7 @@ Release 0.16.0
|
||||||
TableNotFoundException when a different table has been created
|
TableNotFoundException when a different table has been created
|
||||||
previously (Bryan Duxbury via Stack)
|
previously (Bryan Duxbury via Stack)
|
||||||
HADOOP-2587 Splits blocked by compactions cause region to be offline for
|
HADOOP-2587 Splits blocked by compactions cause region to be offline for
|
||||||
duration of compaction.
|
duration of compaction.
|
||||||
HADOOP-2592 Scanning, a region can let out a row that its not supposed
|
HADOOP-2592 Scanning, a region can let out a row that its not supposed
|
||||||
to have
|
to have
|
||||||
HADOOP-2493 hbase will split on row when the start and end row is the
|
HADOOP-2493 hbase will split on row when the start and end row is the
|
||||||
|
@ -4188,7 +4188,7 @@ Release 0.16.0
|
||||||
table or table you are enumerating isn't the first table
|
table or table you are enumerating isn't the first table
|
||||||
Delete empty file: src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/
|
Delete empty file: src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/
|
||||||
TableOutputCollector.java per Nigel Daley
|
TableOutputCollector.java per Nigel Daley
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HADOOP-2401 Add convenience put method that takes writable
|
HADOOP-2401 Add convenience put method that takes writable
|
||||||
(Johan Oskarsson via Stack)
|
(Johan Oskarsson via Stack)
|
||||||
|
@ -4230,7 +4230,7 @@ Release 0.16.0
|
||||||
HADOOP-2351 If select command returns no result, it doesn't need to show the
|
HADOOP-2351 If select command returns no result, it doesn't need to show the
|
||||||
header information (Edward Yoon via Stack)
|
header information (Edward Yoon via Stack)
|
||||||
HADOOP-2285 Add being able to shutdown regionservers (Dennis Kubes via Stack)
|
HADOOP-2285 Add being able to shutdown regionservers (Dennis Kubes via Stack)
|
||||||
HADOOP-2458 HStoreFile.writeSplitInfo should just call
|
HADOOP-2458 HStoreFile.writeSplitInfo should just call
|
||||||
HStoreFile.Reference.write
|
HStoreFile.Reference.write
|
||||||
HADOOP-2471 Add reading/writing MapFile to PerformanceEvaluation suite
|
HADOOP-2471 Add reading/writing MapFile to PerformanceEvaluation suite
|
||||||
HADOOP-2522 Separate MapFile benchmark from PerformanceEvaluation
|
HADOOP-2522 Separate MapFile benchmark from PerformanceEvaluation
|
||||||
|
@ -4250,7 +4250,7 @@ Release 0.16.0
|
||||||
HADOOP-2616 hbase not spliting when the total size of region reaches max
|
HADOOP-2616 hbase not spliting when the total size of region reaches max
|
||||||
region size * 1.5
|
region size * 1.5
|
||||||
HADOOP-2643 Make migration tool smarter.
|
HADOOP-2643 Make migration tool smarter.
|
||||||
|
|
||||||
Release 0.15.1
|
Release 0.15.1
|
||||||
Branch 0.15
|
Branch 0.15
|
||||||
|
|
||||||
|
@ -4318,9 +4318,9 @@ Branch 0.15
|
||||||
HADOOP-1975 HBase tests failing with java.lang.NumberFormatException
|
HADOOP-1975 HBase tests failing with java.lang.NumberFormatException
|
||||||
HADOOP-1990 Regression test instability affects nightly and patch builds
|
HADOOP-1990 Regression test instability affects nightly and patch builds
|
||||||
HADOOP-1996 TestHStoreFile fails on windows if run multiple times
|
HADOOP-1996 TestHStoreFile fails on windows if run multiple times
|
||||||
HADOOP-1937 When the master times out a region server's lease, it is too
|
HADOOP-1937 When the master times out a region server's lease, it is too
|
||||||
aggressive in reclaiming the server's log.
|
aggressive in reclaiming the server's log.
|
||||||
HADOOP-2004 webapp hql formatting bugs
|
HADOOP-2004 webapp hql formatting bugs
|
||||||
HADOOP_2011 Make hbase daemon scripts take args in same order as hadoop
|
HADOOP_2011 Make hbase daemon scripts take args in same order as hadoop
|
||||||
daemon scripts
|
daemon scripts
|
||||||
HADOOP-2017 TestRegionServerAbort failure in patch build #903 and
|
HADOOP-2017 TestRegionServerAbort failure in patch build #903 and
|
||||||
|
@ -4339,7 +4339,7 @@ Branch 0.15
|
||||||
HADOOP-1794 Remove deprecated APIs
|
HADOOP-1794 Remove deprecated APIs
|
||||||
HADOOP-1802 Startup scripts should wait until hdfs as cleared 'safe mode'
|
HADOOP-1802 Startup scripts should wait until hdfs as cleared 'safe mode'
|
||||||
HADOOP-1833 bin/stop_hbase.sh returns before it completes
|
HADOOP-1833 bin/stop_hbase.sh returns before it completes
|
||||||
(Izaak Rubin via Stack)
|
(Izaak Rubin via Stack)
|
||||||
HADOOP-1835 Updated Documentation for HBase setup/installation
|
HADOOP-1835 Updated Documentation for HBase setup/installation
|
||||||
(Izaak Rubin via Stack)
|
(Izaak Rubin via Stack)
|
||||||
HADOOP-1868 Make default configuration more responsive
|
HADOOP-1868 Make default configuration more responsive
|
||||||
|
@ -4358,13 +4358,13 @@ Below are the list of changes before 2007-08-18
|
||||||
1. HADOOP-1384. HBase omnibus patch. (jimk, Vuk Ercegovac, and Michael Stack)
|
1. HADOOP-1384. HBase omnibus patch. (jimk, Vuk Ercegovac, and Michael Stack)
|
||||||
2. HADOOP-1402. Fix javadoc warnings in hbase contrib. (Michael Stack)
|
2. HADOOP-1402. Fix javadoc warnings in hbase contrib. (Michael Stack)
|
||||||
3. HADOOP-1404. HBase command-line shutdown failing (Michael Stack)
|
3. HADOOP-1404. HBase command-line shutdown failing (Michael Stack)
|
||||||
4. HADOOP-1397. Replace custom hbase locking with
|
4. HADOOP-1397. Replace custom hbase locking with
|
||||||
java.util.concurrent.locks.ReentrantLock (Michael Stack)
|
java.util.concurrent.locks.ReentrantLock (Michael Stack)
|
||||||
5. HADOOP-1403. HBase reliability - make master and region server more fault
|
5. HADOOP-1403. HBase reliability - make master and region server more fault
|
||||||
tolerant.
|
tolerant.
|
||||||
6. HADOOP-1418. HBase miscellaneous: unit test for HClient, client to do
|
6. HADOOP-1418. HBase miscellaneous: unit test for HClient, client to do
|
||||||
'Performance Evaluation', etc.
|
'Performance Evaluation', etc.
|
||||||
7. HADOOP-1420, HADOOP-1423. Findbugs changes, remove reference to removed
|
7. HADOOP-1420, HADOOP-1423. Findbugs changes, remove reference to removed
|
||||||
class HLocking.
|
class HLocking.
|
||||||
8. HADOOP-1424. TestHBaseCluster fails with IllegalMonitorStateException. Fix
|
8. HADOOP-1424. TestHBaseCluster fails with IllegalMonitorStateException. Fix
|
||||||
regression introduced by HADOOP-1397.
|
regression introduced by HADOOP-1397.
|
||||||
|
@ -4378,7 +4378,7 @@ Below are the list of changes before 2007-08-18
|
||||||
14. HADOOP-1460 On shutdown IOException with complaint 'Cannot cancel lease
|
14. HADOOP-1460 On shutdown IOException with complaint 'Cannot cancel lease
|
||||||
that is not held'
|
that is not held'
|
||||||
15. HADOOP-1421 Failover detection, split log files.
|
15. HADOOP-1421 Failover detection, split log files.
|
||||||
For the files modified, also clean up javadoc, class, field and method
|
For the files modified, also clean up javadoc, class, field and method
|
||||||
visibility (HADOOP-1466)
|
visibility (HADOOP-1466)
|
||||||
16. HADOOP-1479 Fix NPE in HStore#get if store file only has keys < passed key.
|
16. HADOOP-1479 Fix NPE in HStore#get if store file only has keys < passed key.
|
||||||
17. HADOOP-1476 Distributed version of 'Performance Evaluation' script
|
17. HADOOP-1476 Distributed version of 'Performance Evaluation' script
|
||||||
|
@ -4397,13 +4397,13 @@ Below are the list of changes before 2007-08-18
|
||||||
26. HADOOP-1543 [hbase] Add HClient.tableExists
|
26. HADOOP-1543 [hbase] Add HClient.tableExists
|
||||||
27. HADOOP-1519 [hbase] map/reduce interface for HBase. (Vuk Ercegovac and
|
27. HADOOP-1519 [hbase] map/reduce interface for HBase. (Vuk Ercegovac and
|
||||||
Jim Kellerman)
|
Jim Kellerman)
|
||||||
28. HADOOP-1523 Hung region server waiting on write locks
|
28. HADOOP-1523 Hung region server waiting on write locks
|
||||||
29. HADOOP-1560 NPE in MiniHBaseCluster on Windows
|
29. HADOOP-1560 NPE in MiniHBaseCluster on Windows
|
||||||
30. HADOOP-1531 Add RowFilter to HRegion.HScanner
|
30. HADOOP-1531 Add RowFilter to HRegion.HScanner
|
||||||
Adds a row filtering interface and two implemenentations: A page scanner,
|
Adds a row filtering interface and two implemenentations: A page scanner,
|
||||||
and a regex row/column-data matcher. (James Kennedy via Stack)
|
and a regex row/column-data matcher. (James Kennedy via Stack)
|
||||||
31. HADOOP-1566 Key-making utility
|
31. HADOOP-1566 Key-making utility
|
||||||
32. HADOOP-1415 Provide configurable per-column bloom filters.
|
32. HADOOP-1415 Provide configurable per-column bloom filters.
|
||||||
HADOOP-1466 Clean up visibility and javadoc issues in HBase.
|
HADOOP-1466 Clean up visibility and javadoc issues in HBase.
|
||||||
33. HADOOP-1538 Provide capability for client specified time stamps in HBase
|
33. HADOOP-1538 Provide capability for client specified time stamps in HBase
|
||||||
HADOOP-1466 Clean up visibility and javadoc issues in HBase.
|
HADOOP-1466 Clean up visibility and javadoc issues in HBase.
|
||||||
|
@ -4417,7 +4417,7 @@ Below are the list of changes before 2007-08-18
|
||||||
41. HADOOP-1614 [hbase] HClient does not protect itself from simultaneous updates
|
41. HADOOP-1614 [hbase] HClient does not protect itself from simultaneous updates
|
||||||
42. HADOOP-1468 Add HBase batch update to reduce RPC overhead
|
42. HADOOP-1468 Add HBase batch update to reduce RPC overhead
|
||||||
43. HADOOP-1616 Sporadic TestTable failures
|
43. HADOOP-1616 Sporadic TestTable failures
|
||||||
44. HADOOP-1615 Replacing thread notification-based queue with
|
44. HADOOP-1615 Replacing thread notification-based queue with
|
||||||
java.util.concurrent.BlockingQueue in HMaster, HRegionServer
|
java.util.concurrent.BlockingQueue in HMaster, HRegionServer
|
||||||
45. HADOOP-1606 Updated implementation of RowFilterSet, RowFilterInterface
|
45. HADOOP-1606 Updated implementation of RowFilterSet, RowFilterInterface
|
||||||
(Izaak Rubin via Stack)
|
(Izaak Rubin via Stack)
|
||||||
|
@ -4438,10 +4438,10 @@ Below are the list of changes before 2007-08-18
|
||||||
53. HADOOP-1528 HClient for multiple tables - expose close table function
|
53. HADOOP-1528 HClient for multiple tables - expose close table function
|
||||||
54. HADOOP-1466 Clean up warnings, visibility and javadoc issues in HBase.
|
54. HADOOP-1466 Clean up warnings, visibility and javadoc issues in HBase.
|
||||||
55. HADOOP-1662 Make region splits faster
|
55. HADOOP-1662 Make region splits faster
|
||||||
56. HADOOP-1678 On region split, master should designate which host should
|
56. HADOOP-1678 On region split, master should designate which host should
|
||||||
serve daughter splits. Phase 1: Master balances load for new regions and
|
serve daughter splits. Phase 1: Master balances load for new regions and
|
||||||
when a region server fails.
|
when a region server fails.
|
||||||
57. HADOOP-1678 On region split, master should designate which host should
|
57. HADOOP-1678 On region split, master should designate which host should
|
||||||
serve daughter splits. Phase 2: Master assigns children of split region
|
serve daughter splits. Phase 2: Master assigns children of split region
|
||||||
instead of HRegionServer serving both children.
|
instead of HRegionServer serving both children.
|
||||||
58. HADOOP-1710 All updates should be batch updates
|
58. HADOOP-1710 All updates should be batch updates
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
# * See the License for the specific language governing permissions and
|
# * See the License for the specific language governing permissions and
|
||||||
# * limitations under the License.
|
# * limitations under the License.
|
||||||
# */
|
# */
|
||||||
#
|
#
|
||||||
|
|
||||||
usage="Usage: considerAsDead.sh --hostname serverName"
|
usage="Usage: considerAsDead.sh --hostname serverName"
|
||||||
|
|
||||||
|
@ -50,12 +50,12 @@ do
|
||||||
rs_parts=(${rs//,/ })
|
rs_parts=(${rs//,/ })
|
||||||
hostname=${rs_parts[0]}
|
hostname=${rs_parts[0]}
|
||||||
echo $deadhost
|
echo $deadhost
|
||||||
echo $hostname
|
echo $hostname
|
||||||
if [ "$deadhost" == "$hostname" ]; then
|
if [ "$deadhost" == "$hostname" ]; then
|
||||||
znode="$zkrs/$rs"
|
znode="$zkrs/$rs"
|
||||||
echo "ZNode Deleting:" $znode
|
echo "ZNode Deleting:" $znode
|
||||||
$bin/hbase zkcli delete $znode > /dev/null 2>&1
|
$bin/hbase zkcli delete $znode > /dev/null 2>&1
|
||||||
sleep 1
|
sleep 1
|
||||||
ssh $HBASE_SSH_OPTS $hostname $remote_cmd 2>&1 | sed "s/^/$hostname: /"
|
ssh $HBASE_SSH_OPTS $hostname $remote_cmd 2>&1 | sed "s/^/$hostname: /"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
|
@ -74,7 +74,7 @@ check_for_znodes() {
|
||||||
znodes=`"$bin"/hbase zkcli ls $zparent/$zchild 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"`
|
znodes=`"$bin"/hbase zkcli ls $zparent/$zchild 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"`
|
||||||
if [ "$znodes" != "" ]; then
|
if [ "$znodes" != "" ]; then
|
||||||
echo -n "ZNode(s) [${znodes}] of $command are not expired. Exiting without cleaning hbase data."
|
echo -n "ZNode(s) [${znodes}] of $command are not expired. Exiting without cleaning hbase data."
|
||||||
echo #force a newline
|
echo #force a newline
|
||||||
exit 1;
|
exit 1;
|
||||||
else
|
else
|
||||||
echo -n "All ZNode(s) of $command are expired."
|
echo -n "All ZNode(s) of $command are expired."
|
||||||
|
@ -99,7 +99,7 @@ execute_clean_acls() {
|
||||||
|
|
||||||
clean_up() {
|
clean_up() {
|
||||||
case $1 in
|
case $1 in
|
||||||
--cleanZk)
|
--cleanZk)
|
||||||
execute_zk_command "deleteall ${zparent}";
|
execute_zk_command "deleteall ${zparent}";
|
||||||
;;
|
;;
|
||||||
--cleanHdfs)
|
--cleanHdfs)
|
||||||
|
@ -120,7 +120,7 @@ clean_up() {
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
check_znode_exists() {
|
check_znode_exists() {
|
||||||
|
|
|
@ -103,7 +103,7 @@ do
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Allow alternate hbase conf dir location.
|
# Allow alternate hbase conf dir location.
|
||||||
HBASE_CONF_DIR="${HBASE_CONF_DIR:-$HBASE_HOME/conf}"
|
HBASE_CONF_DIR="${HBASE_CONF_DIR:-$HBASE_HOME/conf}"
|
||||||
# List of hbase regions servers.
|
# List of hbase regions servers.
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
# * See the License for the specific language governing permissions and
|
# * See the License for the specific language governing permissions and
|
||||||
# * limitations under the License.
|
# * limitations under the License.
|
||||||
# */
|
# */
|
||||||
#
|
#
|
||||||
# Run a shell command on all backup master hosts.
|
# Run a shell command on all backup master hosts.
|
||||||
#
|
#
|
||||||
# Environment Variables
|
# Environment Variables
|
||||||
|
@ -45,7 +45,7 @@ bin=`cd "$bin">/dev/null; pwd`
|
||||||
. "$bin"/hbase-config.sh
|
. "$bin"/hbase-config.sh
|
||||||
|
|
||||||
# If the master backup file is specified in the command line,
|
# If the master backup file is specified in the command line,
|
||||||
# then it takes precedence over the definition in
|
# then it takes precedence over the definition in
|
||||||
# hbase-env.sh. Save it here.
|
# hbase-env.sh. Save it here.
|
||||||
HOSTLIST=$HBASE_BACKUP_MASTERS
|
HOSTLIST=$HBASE_BACKUP_MASTERS
|
||||||
|
|
||||||
|
@ -69,6 +69,6 @@ if [ -f $HOSTLIST ]; then
|
||||||
sleep $HBASE_SLAVE_SLEEP
|
sleep $HBASE_SLAVE_SLEEP
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
wait
|
wait
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
# * See the License for the specific language governing permissions and
|
# * See the License for the specific language governing permissions and
|
||||||
# * limitations under the License.
|
# * limitations under the License.
|
||||||
# */
|
# */
|
||||||
#
|
#
|
||||||
# Run a shell command on all regionserver hosts.
|
# Run a shell command on all regionserver hosts.
|
||||||
#
|
#
|
||||||
# Environment Variables
|
# Environment Variables
|
||||||
|
@ -45,7 +45,7 @@ bin=`cd "$bin">/dev/null; pwd`
|
||||||
. "$bin"/hbase-config.sh
|
. "$bin"/hbase-config.sh
|
||||||
|
|
||||||
# If the regionservers file is specified in the command line,
|
# If the regionservers file is specified in the command line,
|
||||||
# then it takes precedence over the definition in
|
# then it takes precedence over the definition in
|
||||||
# hbase-env.sh. Save it here.
|
# hbase-env.sh. Save it here.
|
||||||
HOSTLIST=$HBASE_REGIONSERVERS
|
HOSTLIST=$HBASE_REGIONSERVERS
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ fi
|
||||||
|
|
||||||
export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-master-$HOSTNAME
|
export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-master-$HOSTNAME
|
||||||
export HBASE_LOGFILE=$HBASE_LOG_PREFIX.log
|
export HBASE_LOGFILE=$HBASE_LOG_PREFIX.log
|
||||||
logout=$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.out
|
logout=$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.out
|
||||||
loglog="${HBASE_LOG_DIR}/${HBASE_LOGFILE}"
|
loglog="${HBASE_LOG_DIR}/${HBASE_LOGFILE}"
|
||||||
pid=${HBASE_PID_DIR:-/tmp}/hbase-$HBASE_IDENT_STRING-master.pid
|
pid=${HBASE_PID_DIR:-/tmp}/hbase-$HBASE_IDENT_STRING-master.pid
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ fi
|
||||||
# distributed == false means that the HMaster will kill ZK when it exits
|
# distributed == false means that the HMaster will kill ZK when it exits
|
||||||
# HBASE-6504 - only take the first line of the output in case verbose gc is on
|
# HBASE-6504 - only take the first line of the output in case verbose gc is on
|
||||||
distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1`
|
distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1`
|
||||||
if [ "$distMode" == 'true' ]
|
if [ "$distMode" == 'true' ]
|
||||||
then
|
then
|
||||||
"$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" stop zookeeper
|
"$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" stop zookeeper
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -68,7 +68,7 @@ while [ $# -ne 0 ]; do
|
||||||
-h|--help)
|
-h|--help)
|
||||||
print_usage ;;
|
print_usage ;;
|
||||||
--kill)
|
--kill)
|
||||||
IS_KILL=1
|
IS_KILL=1
|
||||||
cmd_specified ;;
|
cmd_specified ;;
|
||||||
--show)
|
--show)
|
||||||
IS_SHOW=1
|
IS_SHOW=1
|
||||||
|
@ -106,5 +106,3 @@ else
|
||||||
echo "No command specified" >&2
|
echo "No command specified" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
# * See the License for the specific language governing permissions and
|
# * See the License for the specific language governing permissions and
|
||||||
# * limitations under the License.
|
# * limitations under the License.
|
||||||
# */
|
# */
|
||||||
#
|
#
|
||||||
# Run a shell command on all zookeeper hosts.
|
# Run a shell command on all zookeeper hosts.
|
||||||
#
|
#
|
||||||
# Environment Variables
|
# Environment Variables
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
# The maximum amount of heap to use. Default is left to JVM default.
|
# The maximum amount of heap to use. Default is left to JVM default.
|
||||||
# export HBASE_HEAPSIZE=1G
|
# export HBASE_HEAPSIZE=1G
|
||||||
|
|
||||||
# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of
|
# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of
|
||||||
# offheap, set the value to "8G". See http://hbase.apache.org/book.html#direct.memory
|
# offheap, set the value to "8G". See http://hbase.apache.org/book.html#direct.memory
|
||||||
# in the refguide for guidance setting this config.
|
# in the refguide for guidance setting this config.
|
||||||
# export HBASE_OFFHEAPSIZE=1G
|
# export HBASE_OFFHEAPSIZE=1G
|
||||||
|
@ -71,7 +71,7 @@
|
||||||
# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
|
# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
|
||||||
|
|
||||||
# See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations
|
# See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations
|
||||||
# needed setting up off-heap block caching.
|
# needed setting up off-heap block caching.
|
||||||
|
|
||||||
# Uncomment and adjust to enable JMX exporting
|
# Uncomment and adjust to enable JMX exporting
|
||||||
# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
|
# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
|
||||||
|
@ -102,7 +102,7 @@
|
||||||
# Where log files are stored. $HBASE_HOME/logs by default.
|
# Where log files are stored. $HBASE_HOME/logs by default.
|
||||||
# export HBASE_LOG_DIR=${HBASE_HOME}/logs
|
# export HBASE_LOG_DIR=${HBASE_HOME}/logs
|
||||||
|
|
||||||
# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers
|
# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers
|
||||||
# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
|
# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
|
||||||
# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
|
# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
|
||||||
# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
|
# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
|
||||||
|
@ -126,13 +126,13 @@
|
||||||
# Tell HBase whether it should manage it's own instance of ZooKeeper or not.
|
# Tell HBase whether it should manage it's own instance of ZooKeeper or not.
|
||||||
# export HBASE_MANAGES_ZK=true
|
# export HBASE_MANAGES_ZK=true
|
||||||
|
|
||||||
# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the
|
# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the
|
||||||
# RFA appender. Please refer to the log4j2.properties file to see more details on this appender.
|
# RFA appender. Please refer to the log4j2.properties file to see more details on this appender.
|
||||||
# In case one needs to do log rolling on a date change, one should set the environment property
|
# In case one needs to do log rolling on a date change, one should set the environment property
|
||||||
# HBASE_ROOT_LOGGER to "<DESIRED_LOG LEVEL>,DRFA".
|
# HBASE_ROOT_LOGGER to "<DESIRED_LOG LEVEL>,DRFA".
|
||||||
# For example:
|
# For example:
|
||||||
# export HBASE_ROOT_LOGGER=INFO,DRFA
|
# export HBASE_ROOT_LOGGER=INFO,DRFA
|
||||||
# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as
|
# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as
|
||||||
# DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context.
|
# DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context.
|
||||||
|
|
||||||
# Tell HBase whether it should include Hadoop's lib when start up,
|
# Tell HBase whether it should include Hadoop's lib when start up,
|
||||||
|
|
|
@ -24,20 +24,20 @@
|
||||||
<property>
|
<property>
|
||||||
<name>security.client.protocol.acl</name>
|
<name>security.client.protocol.acl</name>
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
<description>ACL for ClientProtocol and AdminProtocol implementations (ie.
|
<description>ACL for ClientProtocol and AdminProtocol implementations (ie.
|
||||||
clients talking to HRegionServers)
|
clients talking to HRegionServers)
|
||||||
The ACL is a comma-separated list of user and group names. The user and
|
The ACL is a comma-separated list of user and group names. The user and
|
||||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||||
A special value of "*" means all users are allowed.</description>
|
A special value of "*" means all users are allowed.</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>security.admin.protocol.acl</name>
|
<name>security.admin.protocol.acl</name>
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
<description>ACL for HMasterInterface protocol implementation (ie.
|
<description>ACL for HMasterInterface protocol implementation (ie.
|
||||||
clients talking to HMaster for admin operations).
|
clients talking to HMaster for admin operations).
|
||||||
The ACL is a comma-separated list of user and group names. The user and
|
The ACL is a comma-separated list of user and group names. The user and
|
||||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||||
A special value of "*" means all users are allowed.</description>
|
A special value of "*" means all users are allowed.</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
@ -46,8 +46,8 @@
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
<description>ACL for HMasterRegionInterface protocol implementations
|
<description>ACL for HMasterRegionInterface protocol implementations
|
||||||
(for HRegionServers communicating with HMaster)
|
(for HRegionServers communicating with HMaster)
|
||||||
The ACL is a comma-separated list of user and group names. The user and
|
The ACL is a comma-separated list of user and group names. The user and
|
||||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||||
A special value of "*" means all users are allowed.</description>
|
A special value of "*" means all users are allowed.</description>
|
||||||
</property>
|
</property>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -38,4 +38,4 @@ ${type_declaration}</template><template autoinsert="true" context="classbody_con
|
||||||
</template><template autoinsert="true" context="catchblock_context" deleted="true" description="Code in new catch blocks" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.catchblock" name="catchblock">// ${todo} Auto-generated catch block
|
</template><template autoinsert="true" context="catchblock_context" deleted="true" description="Code in new catch blocks" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.catchblock" name="catchblock">// ${todo} Auto-generated catch block
|
||||||
${exception_var}.printStackTrace();</template><template autoinsert="false" context="methodbody_context" deleted="true" description="Code in created method stubs" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.methodbody" name="methodbody">// ${todo} Implement ${enclosing_type}.${enclosing_method}
|
${exception_var}.printStackTrace();</template><template autoinsert="false" context="methodbody_context" deleted="true" description="Code in created method stubs" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.methodbody" name="methodbody">// ${todo} Implement ${enclosing_type}.${enclosing_method}
|
||||||
${body_statement}</template><template autoinsert="false" context="constructorbody_context" deleted="true" description="Code in created constructor stubs" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.constructorbody" name="constructorbody">${body_statement}
|
${body_statement}</template><template autoinsert="false" context="constructorbody_context" deleted="true" description="Code in created constructor stubs" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.constructorbody" name="constructorbody">${body_statement}
|
||||||
// ${todo} Implement constructor</template><template autoinsert="true" context="getterbody_context" deleted="true" description="Code in created getters" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.getterbody" name="getterbody">return ${field};</template><template autoinsert="true" context="setterbody_context" deleted="true" description="Code in created setters" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.setterbody" name="setterbody">${field} = ${param};</template></templates>
|
// ${todo} Implement constructor</template><template autoinsert="true" context="getterbody_context" deleted="true" description="Code in created getters" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.getterbody" name="getterbody">return ${field};</template><template autoinsert="true" context="setterbody_context" deleted="true" description="Code in created setters" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.setterbody" name="setterbody">${field} = ${param};</template></templates>
|
||||||
|
|
|
@ -87,7 +87,7 @@ these personalities; a pre-packaged personality can be selected via the
|
||||||
`--project` parameter. There is a provided HBase personality in Yetus, however
|
`--project` parameter. There is a provided HBase personality in Yetus, however
|
||||||
the HBase project maintains its own within the HBase source repository. Specify
|
the HBase project maintains its own within the HBase source repository. Specify
|
||||||
the path to the personality file using `--personality`. The HBase repository
|
the path to the personality file using `--personality`. The HBase repository
|
||||||
places this file under `dev-support/hbase-personality.sh`.
|
places this file under `dev-support/hbase-personality.sh`.
|
||||||
|
|
||||||
## Docker mode
|
## Docker mode
|
||||||
|
|
||||||
|
|
|
@ -141,7 +141,7 @@ Interactions with Jira:
|
||||||
|
|
||||||
This invocation will build a "simple" database, correlating commits to
|
This invocation will build a "simple" database, correlating commits to
|
||||||
branches. It omits gathering the detailed release tag data, so it runs pretty
|
branches. It omits gathering the detailed release tag data, so it runs pretty
|
||||||
quickly.
|
quickly.
|
||||||
|
|
||||||
Example Run:
|
Example Run:
|
||||||
|
|
||||||
|
|
|
@ -344,53 +344,53 @@ EOF
|
||||||
|
|
||||||
echo "writing out example TSV to example.tsv"
|
echo "writing out example TSV to example.tsv"
|
||||||
cat >"${working_dir}/example.tsv" <<EOF
|
cat >"${working_dir}/example.tsv" <<EOF
|
||||||
row1 value8 value8
|
row1 value8 value8
|
||||||
row3 value2
|
row3 value2
|
||||||
row2 value9
|
row2 value9
|
||||||
row10 value1
|
row10 value1
|
||||||
pow1 value8 value8
|
pow1 value8 value8
|
||||||
pow3 value2
|
pow3 value2
|
||||||
pow2 value9
|
pow2 value9
|
||||||
pow10 value1
|
pow10 value1
|
||||||
paw1 value8 value8
|
paw1 value8 value8
|
||||||
paw3 value2
|
paw3 value2
|
||||||
paw2 value9
|
paw2 value9
|
||||||
paw10 value1
|
paw10 value1
|
||||||
raw1 value8 value8
|
raw1 value8 value8
|
||||||
raw3 value2
|
raw3 value2
|
||||||
raw2 value9
|
raw2 value9
|
||||||
raw10 value1
|
raw10 value1
|
||||||
aow1 value8 value8
|
aow1 value8 value8
|
||||||
aow3 value2
|
aow3 value2
|
||||||
aow2 value9
|
aow2 value9
|
||||||
aow10 value1
|
aow10 value1
|
||||||
aaw1 value8 value8
|
aaw1 value8 value8
|
||||||
aaw3 value2
|
aaw3 value2
|
||||||
aaw2 value9
|
aaw2 value9
|
||||||
aaw10 value1
|
aaw10 value1
|
||||||
how1 value8 value8
|
how1 value8 value8
|
||||||
how3 value2
|
how3 value2
|
||||||
how2 value9
|
how2 value9
|
||||||
how10 value1
|
how10 value1
|
||||||
zow1 value8 value8
|
zow1 value8 value8
|
||||||
zow3 value2
|
zow3 value2
|
||||||
zow2 value9
|
zow2 value9
|
||||||
zow10 value1
|
zow10 value1
|
||||||
zaw1 value8 value8
|
zaw1 value8 value8
|
||||||
zaw3 value2
|
zaw3 value2
|
||||||
zaw2 value9
|
zaw2 value9
|
||||||
zaw10 value1
|
zaw10 value1
|
||||||
haw1 value8 value8
|
haw1 value8 value8
|
||||||
haw3 value2
|
haw3 value2
|
||||||
haw2 value9
|
haw2 value9
|
||||||
haw10 value1
|
haw10 value1
|
||||||
low1 value8 value8
|
low1 value8 value8
|
||||||
low3 value2
|
low3 value2
|
||||||
low2 value9
|
low2 value9
|
||||||
low10 value1
|
low10 value1
|
||||||
law1 value8 value8
|
law1 value8 value8
|
||||||
law3 value2
|
law3 value2
|
||||||
law2 value9
|
law2 value9
|
||||||
law10 value1
|
law10 value1
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ runAllTests=0
|
||||||
|
|
||||||
#set to 1 to replay the failed tests. Previous reports are kept in
|
#set to 1 to replay the failed tests. Previous reports are kept in
|
||||||
# fail_ files
|
# fail_ files
|
||||||
replayFailed=0
|
replayFailed=0
|
||||||
|
|
||||||
#set to 0 to run all medium & large tests in a single maven operation
|
#set to 0 to run all medium & large tests in a single maven operation
|
||||||
# instead of two
|
# instead of two
|
||||||
|
@ -85,10 +85,10 @@ mvnCommand="mvn "
|
||||||
function createListDeadProcess {
|
function createListDeadProcess {
|
||||||
id=$$
|
id=$$
|
||||||
listDeadProcess=""
|
listDeadProcess=""
|
||||||
|
|
||||||
#list of the process with a ppid of 1
|
#list of the process with a ppid of 1
|
||||||
sonProcess=`ps -o pid= --ppid 1`
|
sonProcess=`ps -o pid= --ppid 1`
|
||||||
|
|
||||||
#then the process with a pgid of the script
|
#then the process with a pgid of the script
|
||||||
for pId in $sonProcess
|
for pId in $sonProcess
|
||||||
do
|
do
|
||||||
|
@ -119,32 +119,32 @@ function cleanProcess {
|
||||||
jstack -F -l $pId
|
jstack -F -l $pId
|
||||||
kill $pId
|
kill $pId
|
||||||
echo "kill sent, waiting for 30 seconds"
|
echo "kill sent, waiting for 30 seconds"
|
||||||
sleep 30
|
sleep 30
|
||||||
son=`ps -o pid= --pid $pId | wc -l`
|
son=`ps -o pid= --pid $pId | wc -l`
|
||||||
if (test $son -gt 0)
|
if (test $son -gt 0)
|
||||||
then
|
then
|
||||||
echo "$pId, java sub process of $id, is still running after a standard kill, using kill -9 now"
|
echo "$pId, java sub process of $id, is still running after a standard kill, using kill -9 now"
|
||||||
echo "Stack for $pId before kill -9:"
|
echo "Stack for $pId before kill -9:"
|
||||||
jstack -F -l $pId
|
jstack -F -l $pId
|
||||||
kill -9 $pId
|
kill -9 $pId
|
||||||
echo "kill sent, waiting for 2 seconds"
|
echo "kill sent, waiting for 2 seconds"
|
||||||
sleep 2
|
sleep 2
|
||||||
echo "Process $pId killed by kill -9"
|
echo "Process $pId killed by kill -9"
|
||||||
else
|
else
|
||||||
echo "Process $pId killed by standard kill -15"
|
echo "Process $pId killed by standard kill -15"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "$pId is not a java process (it's $name), I don't kill it."
|
echo "$pId is not a java process (it's $name), I don't kill it."
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
createListDeadProcess
|
createListDeadProcess
|
||||||
if (test ${#listDeadProcess} -gt 0)
|
if (test ${#listDeadProcess} -gt 0)
|
||||||
then
|
then
|
||||||
echo "There are still $sonProcess for process $id left."
|
echo "There are still $sonProcess for process $id left."
|
||||||
else
|
else
|
||||||
echo "Process $id clean, no son process left"
|
echo "Process $id clean, no son process left"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
#count the number of ',' in a string
|
#count the number of ',' in a string
|
||||||
|
@ -155,7 +155,7 @@ function countClasses {
|
||||||
count=$((cars - 1))
|
count=$((cars - 1))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
######################################### script
|
######################################### script
|
||||||
echo "Starting Script. Possible parameters are: runAllTests, replayFailed, nonParallelMaven"
|
echo "Starting Script. Possible parameters are: runAllTests, replayFailed, nonParallelMaven"
|
||||||
echo "Other parameters are sent to maven"
|
echo "Other parameters are sent to maven"
|
||||||
|
@ -177,11 +177,11 @@ do
|
||||||
if [ $arg == "nonParallelMaven" ]
|
if [ $arg == "nonParallelMaven" ]
|
||||||
then
|
then
|
||||||
parallelMaven=0
|
parallelMaven=0
|
||||||
else
|
else
|
||||||
args=$args" $arg"
|
args=$args" $arg"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
|
@ -195,24 +195,24 @@ for testFile in $testsList
|
||||||
do
|
do
|
||||||
lenPath=$((${#rootTestClassDirectory}))
|
lenPath=$((${#rootTestClassDirectory}))
|
||||||
len=$((${#testFile} - $lenPath - 5)) # len(".java") == 5
|
len=$((${#testFile} - $lenPath - 5)) # len(".java") == 5
|
||||||
|
|
||||||
shortTestFile=${testFile:lenPath:$len}
|
shortTestFile=${testFile:lenPath:$len}
|
||||||
testName=$(echo $shortTestFile | sed 's/\//\./g')
|
testName=$(echo $shortTestFile | sed 's/\//\./g')
|
||||||
|
|
||||||
#The ',' is used in the grep pattern as we don't want to catch
|
#The ',' is used in the grep pattern as we don't want to catch
|
||||||
# partial name
|
# partial name
|
||||||
isFlaky=$((`echo $flakyTests | grep "$testName," | wc -l`))
|
isFlaky=$((`echo $flakyTests | grep "$testName," | wc -l`))
|
||||||
|
|
||||||
if (test $isFlaky -eq 0)
|
if (test $isFlaky -eq 0)
|
||||||
then
|
then
|
||||||
isSmall=0
|
isSmall=0
|
||||||
isMedium=0
|
isMedium=0
|
||||||
isLarge=0
|
isLarge=0
|
||||||
|
|
||||||
# determine the category of the test by greping into the source code
|
# determine the category of the test by greping into the source code
|
||||||
isMedium=`grep "@Category" $testFile | grep "MediumTests.class" | wc -l`
|
isMedium=`grep "@Category" $testFile | grep "MediumTests.class" | wc -l`
|
||||||
if (test $isMedium -eq 0)
|
if (test $isMedium -eq 0)
|
||||||
then
|
then
|
||||||
isLarge=`grep "@Category" $testFile | grep "LargeTests.class" | wc -l`
|
isLarge=`grep "@Category" $testFile | grep "LargeTests.class" | wc -l`
|
||||||
if (test $isLarge -eq 0)
|
if (test $isLarge -eq 0)
|
||||||
then
|
then
|
||||||
|
@ -230,22 +230,22 @@ do
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#put the test in the right list
|
#put the test in the right list
|
||||||
if (test $isSmall -gt 0)
|
if (test $isSmall -gt 0)
|
||||||
then
|
then
|
||||||
smallList="$smallList,$testName"
|
smallList="$smallList,$testName"
|
||||||
fi
|
fi
|
||||||
if (test $isMedium -gt 0)
|
if (test $isMedium -gt 0)
|
||||||
then
|
then
|
||||||
mediumList="$mediumList,$testName"
|
mediumList="$mediumList,$testName"
|
||||||
fi
|
fi
|
||||||
if (test $isLarge -gt 0)
|
if (test $isLarge -gt 0)
|
||||||
then
|
then
|
||||||
largeList="$largeList,$testName"
|
largeList="$largeList,$testName"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
#remove the ',' at the beginning
|
#remove the ',' at the beginning
|
||||||
|
@ -285,7 +285,7 @@ do
|
||||||
nextList=2
|
nextList=2
|
||||||
runList1=$runList1,$testClass
|
runList1=$runList1,$testClass
|
||||||
else
|
else
|
||||||
nextList=1
|
nextList=1
|
||||||
runList2=$runList2,$testClass
|
runList2=$runList2,$testClass
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
@ -297,27 +297,27 @@ runList2=${runList2:1:${#runList2}}
|
||||||
#now we can run the tests, at last!
|
#now we can run the tests, at last!
|
||||||
|
|
||||||
echo "Running small tests with one maven instance, in parallel"
|
echo "Running small tests with one maven instance, in parallel"
|
||||||
#echo Small tests are $smallList
|
#echo Small tests are $smallList
|
||||||
$mvnCommand -P singleJVMTests test -Dtest=$smallList $args
|
$mvnCommand -P singleJVMTests test -Dtest=$smallList $args
|
||||||
cleanProcess
|
cleanProcess
|
||||||
|
|
||||||
exeTime=$(((`date +%s` - $startTime)/60))
|
exeTime=$(((`date +%s` - $startTime)/60))
|
||||||
echo "Small tests executed after $exeTime minutes"
|
echo "Small tests executed after $exeTime minutes"
|
||||||
|
|
||||||
if (test $parallelMaven -gt 0)
|
if (test $parallelMaven -gt 0)
|
||||||
then
|
then
|
||||||
echo "Running tests with two maven instances in parallel"
|
echo "Running tests with two maven instances in parallel"
|
||||||
$mvnCommand -P localTests test -Dtest=$runList1 $args &
|
$mvnCommand -P localTests test -Dtest=$runList1 $args &
|
||||||
|
|
||||||
#give some time to the fist process if there is anything to compile
|
#give some time to the fist process if there is anything to compile
|
||||||
sleep 30
|
sleep 30
|
||||||
$mvnCommand -P localTests test -Dtest=$runList2 $args
|
$mvnCommand -P localTests test -Dtest=$runList2 $args
|
||||||
|
|
||||||
#wait for forked process to finish
|
#wait for forked process to finish
|
||||||
wait
|
wait
|
||||||
|
|
||||||
cleanProcess
|
cleanProcess
|
||||||
|
|
||||||
exeTime=$(((`date +%s` - $startTime)/60))
|
exeTime=$(((`date +%s` - $startTime)/60))
|
||||||
echo "Medium and large (if selected) tests executed after $exeTime minutes"
|
echo "Medium and large (if selected) tests executed after $exeTime minutes"
|
||||||
|
|
||||||
|
@ -329,14 +329,14 @@ then
|
||||||
$mvnCommand -P localTests test -Dtest=$flakyTests $args
|
$mvnCommand -P localTests test -Dtest=$flakyTests $args
|
||||||
cleanProcess
|
cleanProcess
|
||||||
exeTime=$(((`date +%s` - $startTime)/60))
|
exeTime=$(((`date +%s` - $startTime)/60))
|
||||||
echo "Flaky tests executed after $exeTime minutes"
|
echo "Flaky tests executed after $exeTime minutes"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "Running tests with a single maven instance, no parallelization"
|
echo "Running tests with a single maven instance, no parallelization"
|
||||||
$mvnCommand -P localTests test -Dtest=$runList1,$runList2,$flakyTests $args
|
$mvnCommand -P localTests test -Dtest=$runList1,$runList2,$flakyTests $args
|
||||||
cleanProcess
|
cleanProcess
|
||||||
exeTime=$(((`date +%s` - $startTime)/60))
|
exeTime=$(((`date +%s` - $startTime)/60))
|
||||||
echo "Single maven instance tests executed after $exeTime minutes"
|
echo "Single maven instance tests executed after $exeTime minutes"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#let's analyze the results
|
#let's analyze the results
|
||||||
|
@ -360,7 +360,7 @@ for testClass in `echo $fullRunList | sed 's/,/ /g'`
|
||||||
do
|
do
|
||||||
reportFile=$surefireReportDirectory/$testClass.txt
|
reportFile=$surefireReportDirectory/$testClass.txt
|
||||||
outputReportFile=$surefireReportDirectory/$testClass-output.txt
|
outputReportFile=$surefireReportDirectory/$testClass-output.txt
|
||||||
|
|
||||||
if [ -s $reportFile ];
|
if [ -s $reportFile ];
|
||||||
then
|
then
|
||||||
isError=`grep FAILURE $reportFile | wc -l`
|
isError=`grep FAILURE $reportFile | wc -l`
|
||||||
|
@ -368,22 +368,22 @@ do
|
||||||
then
|
then
|
||||||
errorList="$errorList,$testClass"
|
errorList="$errorList,$testClass"
|
||||||
errorCounter=$(($errorCounter + 1))
|
errorCounter=$(($errorCounter + 1))
|
||||||
|
|
||||||
#let's copy the files if we want to use it later
|
#let's copy the files if we want to use it later
|
||||||
cp $reportFile "$surefireReportDirectory/fail_$timestamp.$testClass.txt"
|
cp $reportFile "$surefireReportDirectory/fail_$timestamp.$testClass.txt"
|
||||||
if [ -s $reportFile ];
|
if [ -s $reportFile ];
|
||||||
then
|
then
|
||||||
cp $outputReportFile "$surefireReportDirectory/fail_$timestamp.$testClass"-output.txt""
|
cp $outputReportFile "$surefireReportDirectory/fail_$timestamp.$testClass"-output.txt""
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
|
|
||||||
sucessCounter=$(($sucessCounter +1))
|
sucessCounter=$(($sucessCounter +1))
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
#report file does not exist or is empty => the test didn't finish
|
#report file does not exist or is empty => the test didn't finish
|
||||||
notFinishedCounter=$(($notFinishedCounter + 1))
|
notFinishedCounter=$(($notFinishedCounter + 1))
|
||||||
notFinishedList="$notFinishedList,$testClass"
|
notFinishedList="$notFinishedList,$testClass"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
#list of all tests that failed
|
#list of all tests that failed
|
||||||
|
@ -411,7 +411,7 @@ echo
|
||||||
echo "Tests in error are: $errorPresList"
|
echo "Tests in error are: $errorPresList"
|
||||||
echo "Tests that didn't finish are: $notFinishedPresList"
|
echo "Tests that didn't finish are: $notFinishedPresList"
|
||||||
echo
|
echo
|
||||||
echo "Execution time in minutes: $exeTime"
|
echo "Execution time in minutes: $exeTime"
|
||||||
echo "##########################"
|
echo "##########################"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -33,4 +33,3 @@ export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin:
|
||||||
export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData"}"
|
export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData"}"
|
||||||
|
|
||||||
ulimit -n
|
ulimit -n
|
||||||
|
|
||||||
|
|
|
@ -17,11 +17,11 @@
|
||||||
# specific language governing permissions and limitations
|
# specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
# This script assumes that your remote is called "origin"
|
# This script assumes that your remote is called "origin"
|
||||||
# and that your local master branch is called "master".
|
# and that your local master branch is called "master".
|
||||||
# I am sure it could be made more abstract but these are the defaults.
|
# I am sure it could be made more abstract but these are the defaults.
|
||||||
|
|
||||||
# Edit this line to point to your default directory,
|
# Edit this line to point to your default directory,
|
||||||
# or always pass a directory to the script.
|
# or always pass a directory to the script.
|
||||||
|
|
||||||
DEFAULT_DIR="EDIT_ME"
|
DEFAULT_DIR="EDIT_ME"
|
||||||
|
@ -69,13 +69,13 @@ function check_git_branch_status {
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_jira_status {
|
function get_jira_status {
|
||||||
# This function expects as an argument the JIRA ID,
|
# This function expects as an argument the JIRA ID,
|
||||||
# and returns 99 if resolved and 1 if it couldn't
|
# and returns 99 if resolved and 1 if it couldn't
|
||||||
# get the status.
|
# get the status.
|
||||||
|
|
||||||
# The JIRA status looks like this in the HTML:
|
# The JIRA status looks like this in the HTML:
|
||||||
# span id="resolution-val" class="value resolved" >
|
# span id="resolution-val" class="value resolved" >
|
||||||
# The following is a bit brittle, but filters for lines with
|
# The following is a bit brittle, but filters for lines with
|
||||||
# resolution-val returns 99 if it's resolved
|
# resolution-val returns 99 if it's resolved
|
||||||
jira_url='https://issues.apache.org/jira/rest/api/2/issue'
|
jira_url='https://issues.apache.org/jira/rest/api/2/issue'
|
||||||
jira_id="$1"
|
jira_id="$1"
|
||||||
|
@ -106,7 +106,7 @@ while getopts ":hd:" opt; do
|
||||||
print_usage
|
print_usage
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Invalid argument: $OPTARG" >&2
|
echo "Invalid argument: $OPTARG" >&2
|
||||||
print_usage >&2
|
print_usage >&2
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -135,7 +135,7 @@ get_tracking_branches
|
||||||
for i in "${tracking_branches[@]}"; do
|
for i in "${tracking_branches[@]}"; do
|
||||||
git checkout -q "$i"
|
git checkout -q "$i"
|
||||||
# Exit if git status is dirty
|
# Exit if git status is dirty
|
||||||
check_git_branch_status
|
check_git_branch_status
|
||||||
git pull -q --rebase
|
git pull -q --rebase
|
||||||
status=$?
|
status=$?
|
||||||
if [ "$status" -ne 0 ]; then
|
if [ "$status" -ne 0 ]; then
|
||||||
|
@ -169,7 +169,7 @@ for i in "${all_branches[@]}"; do
|
||||||
git checkout -q "$i"
|
git checkout -q "$i"
|
||||||
|
|
||||||
# Exit if git status is dirty
|
# Exit if git status is dirty
|
||||||
check_git_branch_status
|
check_git_branch_status
|
||||||
|
|
||||||
# If this branch has a remote, don't rebase it
|
# If this branch has a remote, don't rebase it
|
||||||
# If it has a remote, it has a log with at least one entry
|
# If it has a remote, it has a log with at least one entry
|
||||||
|
@ -184,7 +184,7 @@ for i in "${all_branches[@]}"; do
|
||||||
echo "Failed. Rolling back. Rebase $i manually."
|
echo "Failed. Rolling back. Rebase $i manually."
|
||||||
git rebase --abort
|
git rebase --abort
|
||||||
fi
|
fi
|
||||||
elif [ $status -ne 0 ]; then
|
elif [ $status -ne 0 ]; then
|
||||||
# If status is 0 it means there is a remote branch, we already took care of it
|
# If status is 0 it means there is a remote branch, we already took care of it
|
||||||
echo "Unknown error: $?" >&2
|
echo "Unknown error: $?" >&2
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -195,10 +195,10 @@ done
|
||||||
for i in "${deleted_branches[@]}"; do
|
for i in "${deleted_branches[@]}"; do
|
||||||
read -p "$i's JIRA is resolved. Delete? " yn
|
read -p "$i's JIRA is resolved. Delete? " yn
|
||||||
case $yn in
|
case $yn in
|
||||||
[Yy])
|
[Yy])
|
||||||
git branch -D $i
|
git branch -D $i
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "To delete it manually, run git branch -D $deleted_branches"
|
echo "To delete it manually, run git branch -D $deleted_branches"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
|
@ -52,7 +52,7 @@ if $PATCH -p0 -E --dry-run < $PATCH_FILE 2>&1 > $TMP; then
|
||||||
# correct place to put those files.
|
# correct place to put those files.
|
||||||
|
|
||||||
# NOTE 2014/07/17:
|
# NOTE 2014/07/17:
|
||||||
# Temporarily disabling below check since our jenkins boxes seems to be not defaulting to bash
|
# Temporarily disabling below check since our jenkins boxes seems to be not defaulting to bash
|
||||||
# causing below checks to fail. Once it is fixed, we can revert the commit and enable this again.
|
# causing below checks to fail. Once it is fixed, we can revert the commit and enable this again.
|
||||||
|
|
||||||
# TMP2=/tmp/tmp.paths.2.$$
|
# TMP2=/tmp/tmp.paths.2.$$
|
||||||
|
|
|
@ -32,7 +32,7 @@ options:
|
||||||
-h Show this message
|
-h Show this message
|
||||||
-c Run 'mvn clean' before running the tests
|
-c Run 'mvn clean' before running the tests
|
||||||
-f FILE Run the additional tests listed in the FILE
|
-f FILE Run the additional tests listed in the FILE
|
||||||
-u Only run unit tests. Default is to run
|
-u Only run unit tests. Default is to run
|
||||||
unit and integration tests
|
unit and integration tests
|
||||||
-n N Run each test N times. Default = 1.
|
-n N Run each test N times. Default = 1.
|
||||||
-s N Print N slowest tests
|
-s N Print N slowest tests
|
||||||
|
@ -92,7 +92,7 @@ do
|
||||||
r)
|
r)
|
||||||
server=1
|
server=1
|
||||||
;;
|
;;
|
||||||
?)
|
?)
|
||||||
usage
|
usage
|
||||||
exit 1
|
exit 1
|
||||||
esac
|
esac
|
||||||
|
@ -175,7 +175,7 @@ done
|
||||||
|
|
||||||
# Print a report of the slowest running tests
|
# Print a report of the slowest running tests
|
||||||
if [ ! -z $showSlowest ]; then
|
if [ ! -z $showSlowest ]; then
|
||||||
|
|
||||||
testNameIdx=0
|
testNameIdx=0
|
||||||
for (( i = 0; i < ${#test[@]}; i++ ))
|
for (( i = 0; i < ${#test[@]}; i++ ))
|
||||||
do
|
do
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
#set -x
|
#set -x
|
||||||
# printenv
|
# printenv
|
||||||
|
|
||||||
### Setup some variables.
|
### Setup some variables.
|
||||||
bindir=$(dirname $0)
|
bindir=$(dirname $0)
|
||||||
|
|
||||||
# This key is set by our surefire configuration up in the main pom.xml
|
# This key is set by our surefire configuration up in the main pom.xml
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
<?xml version="1.0"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<!--
|
<!--
|
||||||
/**
|
/**
|
||||||
|
@ -21,8 +21,8 @@
|
||||||
-->
|
-->
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hbase</artifactId>
|
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase</artifactId>
|
||||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||||
<relativePath>..</relativePath>
|
<relativePath>..</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
|
@ -15,13 +15,11 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to the client. This tests the hbase-client package and all of the client
|
* Tag a test as related to the client. This tests the hbase-client package and all of the client
|
||||||
* tests in hbase-server.
|
* tests in hbase-server.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to coprocessors.
|
* Tag a test as related to coprocessors.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package.
|
* Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as failing commonly on public build infrastructure.
|
* Tag a test as failing commonly on public build infrastructure.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,13 +15,11 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and
|
* Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and
|
||||||
* the like.
|
* the like.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,23 +15,20 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as 'integration/system' test, meaning that the test class has the following
|
* Tag a test as 'integration/system' test, meaning that the test class has the following
|
||||||
* characteristics:
|
* characteristics:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li> Possibly takes hours to complete</li>
|
* <li>Possibly takes hours to complete</li>
|
||||||
* <li> Can be run on a mini cluster or an actual cluster</li>
|
* <li>Can be run on a mini cluster or an actual cluster</li>
|
||||||
* <li> Can make changes to the given cluster (starting stopping daemons, etc)</li>
|
* <li>Can make changes to the given cluster (starting stopping daemons, etc)</li>
|
||||||
* <li> Should not be run in parallel of other integration tests</li>
|
* <li>Should not be run in parallel of other integration tests</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
*
|
* Integration / System tests should have a class name starting with "IntegrationTest", and should
|
||||||
* Integration / System tests should have a class name starting with "IntegrationTest", and
|
* be annotated with @Category(IntegrationTests.class). Integration tests can be run using the
|
||||||
* should be annotated with @Category(IntegrationTests.class). Integration tests can be run
|
* IntegrationTestsDriver class or from mvn verify.
|
||||||
* using the IntegrationTestsDriver class or from mvn verify.
|
|
||||||
*
|
|
||||||
* @see SmallTests
|
* @see SmallTests
|
||||||
* @see MediumTests
|
* @see MediumTests
|
||||||
* @see LargeTests
|
* @see LargeTests
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,21 +15,19 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tagging a test as 'large', means that the test class has the following characteristics:
|
* Tagging a test as 'large', means that the test class has the following characteristics:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>it can executed in an isolated JVM (Tests can however be executed in different JVM on the
|
* <li>it can executed in an isolated JVM (Tests can however be executed in different JVM on the
|
||||||
* same machine simultaneously so be careful two concurrent tests end up fighting over ports
|
* same machine simultaneously so be careful two concurrent tests end up fighting over ports or
|
||||||
* or other singular resources).</li>
|
* other singular resources).</li>
|
||||||
* <li>ideally, the whole large test-suite/class, no matter how many or how few test methods it
|
* <li>ideally, the whole large test-suite/class, no matter how many or how few test methods it has,
|
||||||
* has, will run in last less than three minutes</li>
|
* will run in last less than three minutes</li>
|
||||||
* <li>No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests'
|
* <li>No large test can take longer than ten minutes; it will be killed. See 'Integeration Tests'
|
||||||
* if you need to run tests longer than this.</li>
|
* if you need to run tests longer than this.</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
*
|
|
||||||
* @see SmallTests
|
* @see SmallTests
|
||||||
* @see MediumTests
|
* @see MediumTests
|
||||||
* @see IntegrationTests
|
* @see IntegrationTests
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to mapred or mapreduce.
|
* Tag a test as related to mapred or mapreduce.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to the master.
|
* Tag a test as related to the master.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,21 +15,18 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tagging a test as 'medium' means that the test class has the following characteristics:
|
* Tagging a test as 'medium' means that the test class has the following characteristics:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>it can be executed in an isolated JVM (Tests can however be executed in different JVMs on
|
* <li>it can be executed in an isolated JVM (Tests can however be executed in different JVMs on the
|
||||||
* the same machine simultaneously so be careful two concurrent tests end up fighting over ports
|
* same machine simultaneously so be careful two concurrent tests end up fighting over ports or
|
||||||
* or other singular resources).</li>
|
* other singular resources).</li>
|
||||||
* <li>ideally, the whole medium test-suite/class, no matter how many or how few test methods it
|
* <li>ideally, the whole medium test-suite/class, no matter how many or how few test methods it
|
||||||
* has, will complete in 50 seconds; otherwise make it a 'large' test.</li>
|
* has, will complete in 50 seconds; otherwise make it a 'large' test.</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
*
|
* Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster.
|
||||||
* Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster.
|
|
||||||
*
|
|
||||||
* @see SmallTests
|
* @see SmallTests
|
||||||
* @see LargeTests
|
* @see LargeTests
|
||||||
* @see IntegrationTests
|
* @see IntegrationTests
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as not easily falling into any of the below categories.
|
* Tag a test as not easily falling into any of the below categories.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to RPC.
|
* Tag a test as related to RPC.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to the regionserver.
|
* Tag a test as related to the regionserver.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to replication.
|
* Tag a test as related to replication.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to the REST capability of HBase.
|
* Tag a test as related to the REST capability of HBase.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to security.
|
* Tag a test as related to security.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -20,14 +20,14 @@ package org.apache.hadoop.hbase.testclassification;
|
||||||
/**
|
/**
|
||||||
* Tagging a test as 'small' means that the test class has the following characteristics:
|
* Tagging a test as 'small' means that the test class has the following characteristics:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>it can be run simultaneously with other small tests all in the same JVM</li>
|
* <li>it can be run simultaneously with other small tests all in the same JVM</li>
|
||||||
* <li>ideally, the WHOLE implementing test-suite/class, no matter how many or how few test
|
* <li>ideally, the WHOLE implementing test-suite/class, no matter how many or how few test methods
|
||||||
* methods it has, should take less than 15 seconds to complete</li>
|
* it has, should take less than 15 seconds to complete</li>
|
||||||
* <li>it does not use a cluster</li>
|
* <li>it does not use a cluster</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
*
|
|
||||||
* @see MediumTests
|
* @see MediumTests
|
||||||
* @see LargeTests
|
* @see LargeTests
|
||||||
* @see IntegrationTests
|
* @see IntegrationTests
|
||||||
*/
|
*/
|
||||||
public interface SmallTests {}
|
public interface SmallTests {
|
||||||
|
}
|
||||||
|
|
|
@ -15,13 +15,11 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build
|
* Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build
|
||||||
* infrastructure.
|
* infrastructure.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,13 +15,11 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as region tests which takes longer than 5 minutes to run on public build
|
* Tag a test as region tests which takes longer than 5 minutes to run on public build
|
||||||
* infrastructure.
|
* infrastructure.
|
||||||
*
|
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
* @see org.apache.hadoop.hbase.testclassification.FilterTests
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
<?xml version="1.0"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
|
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|
||||||
<!--
|
<!--
|
||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
@ -23,8 +22,8 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hbase-archetypes</artifactId>
|
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-archetypes</artifactId>
|
||||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||||
<relativePath>..</relativePath>
|
<relativePath>..</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
@ -58,10 +57,10 @@
|
||||||
further using xml-maven-plugin for xslt transformation, below. -->
|
further using xml-maven-plugin for xslt transformation, below. -->
|
||||||
<execution>
|
<execution>
|
||||||
<id>hbase-client__copy-src-to-build-archetype-subdir</id>
|
<id>hbase-client__copy-src-to-build-archetype-subdir</id>
|
||||||
<phase>generate-resources</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>copy-resources</goal>
|
<goal>copy-resources</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>generate-resources</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${build.archetype.subdir}</outputDirectory>
|
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${build.archetype.subdir}</outputDirectory>
|
||||||
<resources>
|
<resources>
|
||||||
|
@ -76,29 +75,30 @@
|
||||||
</execution>
|
</execution>
|
||||||
<execution>
|
<execution>
|
||||||
<id>hbase-client__copy-pom-to-temp-for-xslt-processing</id>
|
<id>hbase-client__copy-pom-to-temp-for-xslt-processing</id>
|
||||||
<phase>generate-resources</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>copy-resources</goal>
|
<goal>copy-resources</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>generate-resources</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.exemplar.subdir}</outputDirectory>
|
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.exemplar.subdir}</outputDirectory>
|
||||||
<resources>
|
<resources>
|
||||||
<resource>
|
<resource>
|
||||||
<directory>/${project.basedir}/../${hbase-client.dir}</directory>
|
<directory>/${project.basedir}/../${hbase-client.dir}</directory>
|
||||||
<filtering>true</filtering> <!-- filtering replaces ${project.version} with literal -->
|
<filtering>true</filtering>
|
||||||
|
<!-- filtering replaces ${project.version} with literal -->
|
||||||
<includes>
|
<includes>
|
||||||
<include>pom.xml</include>
|
<include>pom.xml</include>
|
||||||
</includes>
|
</includes>
|
||||||
</resource>
|
</resource>
|
||||||
</resources>
|
</resources>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
<execution>
|
<execution>
|
||||||
<id>hbase-shaded-client__copy-src-to-build-archetype-subdir</id>
|
<id>hbase-shaded-client__copy-src-to-build-archetype-subdir</id>
|
||||||
<phase>generate-resources</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>copy-resources</goal>
|
<goal>copy-resources</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>generate-resources</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${build.archetype.subdir}</outputDirectory>
|
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${build.archetype.subdir}</outputDirectory>
|
||||||
<resources>
|
<resources>
|
||||||
|
@ -113,20 +113,21 @@
|
||||||
</execution>
|
</execution>
|
||||||
<execution>
|
<execution>
|
||||||
<id>hbase-shaded-client__copy-pom-to-temp-for-xslt-processing</id>
|
<id>hbase-shaded-client__copy-pom-to-temp-for-xslt-processing</id>
|
||||||
<phase>generate-resources</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>copy-resources</goal>
|
<goal>copy-resources</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>generate-resources</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.exemplar.subdir}</outputDirectory>
|
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.exemplar.subdir}</outputDirectory>
|
||||||
<resources>
|
<resources>
|
||||||
<resource>
|
<resource>
|
||||||
<directory>/${project.basedir}/../${hbase-shaded-client.dir}</directory>
|
<directory>/${project.basedir}/../${hbase-shaded-client.dir}</directory>
|
||||||
<filtering>true</filtering> <!-- filtering replaces ${project.version} with literal -->
|
<filtering>true</filtering>
|
||||||
|
<!-- filtering replaces ${project.version} with literal -->
|
||||||
<includes>
|
<includes>
|
||||||
<include>pom.xml</include>
|
<include>pom.xml</include>
|
||||||
</includes>
|
</includes>
|
||||||
</resource>
|
</resource>
|
||||||
</resources>
|
</resources>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
|
@ -137,10 +138,10 @@
|
||||||
using xml-maven-plugin for xslt transformation, below. -->
|
using xml-maven-plugin for xslt transformation, below. -->
|
||||||
<execution>
|
<execution>
|
||||||
<id>hbase-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id>
|
<id>hbase-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id>
|
||||||
<phase>prepare-package</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>copy-resources</goal>
|
<goal>copy-resources</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>prepare-package</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.archetype.subdir}</outputDirectory>
|
<outputDirectory>/${project.basedir}/../${hbase-client.dir}/${temp.archetype.subdir}</outputDirectory>
|
||||||
<resources>
|
<resources>
|
||||||
|
@ -149,16 +150,16 @@
|
||||||
<includes>
|
<includes>
|
||||||
<include>pom.xml</include>
|
<include>pom.xml</include>
|
||||||
</includes>
|
</includes>
|
||||||
</resource>
|
</resource>
|
||||||
</resources>
|
</resources>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
<execution>
|
<execution>
|
||||||
<id>hbase-shaded-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id>
|
<id>hbase-shaded-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing</id>
|
||||||
<phase>prepare-package</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>copy-resources</goal>
|
<goal>copy-resources</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>prepare-package</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.archetype.subdir}</outputDirectory>
|
<outputDirectory>/${project.basedir}/../${hbase-shaded-client.dir}/${temp.archetype.subdir}</outputDirectory>
|
||||||
<resources>
|
<resources>
|
||||||
|
@ -167,7 +168,7 @@
|
||||||
<includes>
|
<includes>
|
||||||
<include>pom.xml</include>
|
<include>pom.xml</include>
|
||||||
</includes>
|
</includes>
|
||||||
</resource>
|
</resource>
|
||||||
</resources>
|
</resources>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
|
@ -182,10 +183,10 @@
|
||||||
<!-- xml-maven-plugin modifies each exemplar project's pom.xml file to convert to standalone project. -->
|
<!-- xml-maven-plugin modifies each exemplar project's pom.xml file to convert to standalone project. -->
|
||||||
<execution>
|
<execution>
|
||||||
<id>modify-exemplar-pom-files-via-xslt</id>
|
<id>modify-exemplar-pom-files-via-xslt</id>
|
||||||
<phase>process-resources</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>transform</goal>
|
<goal>transform</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>process-resources</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<transformationSets>
|
<transformationSets>
|
||||||
<transformationSet>
|
<transformationSet>
|
||||||
|
@ -212,10 +213,10 @@
|
||||||
prevent warnings when project is generated from archetype. -->
|
prevent warnings when project is generated from archetype. -->
|
||||||
<execution>
|
<execution>
|
||||||
<id>modify-archetype-pom-files-via-xslt</id>
|
<id>modify-archetype-pom-files-via-xslt</id>
|
||||||
<phase>package</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>transform</goal>
|
<goal>transform</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>package</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<transformationSets>
|
<transformationSets>
|
||||||
<transformationSet>
|
<transformationSet>
|
||||||
|
@ -242,32 +243,32 @@
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
||||||
<plugin>
|
<plugin>
|
||||||
<artifactId>maven-antrun-plugin</artifactId>
|
<artifactId>maven-antrun-plugin</artifactId>
|
||||||
<executions>
|
<executions>
|
||||||
<!-- exec-maven-plugin executes chmod to make scripts executable -->
|
<!-- exec-maven-plugin executes chmod to make scripts executable -->
|
||||||
<execution>
|
<execution>
|
||||||
<id>make-scripts-executable</id>
|
<id>make-scripts-executable</id>
|
||||||
<phase>process-resources</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>run</goal>
|
<goal>run</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>process-resources</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<chmod file="${project.basedir}/createArchetypes.sh" perm="+x" />
|
<chmod file="${project.basedir}/createArchetypes.sh" perm="+x"/>
|
||||||
<chmod file="${project.basedir}/installArchetypes.sh" perm="+x" />
|
<chmod file="${project.basedir}/installArchetypes.sh" perm="+x"/>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
<!-- exec-maven-plugin executes script which invokes 'archetype:create-from-project'
|
<!-- exec-maven-plugin executes script which invokes 'archetype:create-from-project'
|
||||||
to derive archetypes from exemplar projects. -->
|
to derive archetypes from exemplar projects. -->
|
||||||
<execution>
|
<execution>
|
||||||
<id>run-createArchetypes-script</id>
|
<id>run-createArchetypes-script</id>
|
||||||
<phase>compile</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>run</goal>
|
<goal>run</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>compile</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<exec executable="${shell-executable}" dir="${project.basedir}" failonerror="true">
|
<exec dir="${project.basedir}" executable="${shell-executable}" failonerror="true">
|
||||||
<arg line="./createArchetypes.sh"/>
|
<arg line="./createArchetypes.sh"/>
|
||||||
</exec>
|
</exec>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
<!-- exec-maven-plugin executes script which invokes 'install' to install each
|
<!-- exec-maven-plugin executes script which invokes 'install' to install each
|
||||||
|
@ -277,14 +278,14 @@
|
||||||
which does test generation of a project based on the archetype. -->
|
which does test generation of a project based on the archetype. -->
|
||||||
<execution>
|
<execution>
|
||||||
<id>run-installArchetypes-script</id>
|
<id>run-installArchetypes-script</id>
|
||||||
<phase>install</phase>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>run</goal>
|
<goal>run</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<phase>install</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<exec executable="${shell-executable}" dir="${project.basedir}" failonerror="true">
|
<exec dir="${project.basedir}" executable="${shell-executable}" failonerror="true">
|
||||||
<arg line="./installArchetypes.sh"/>
|
<arg line="./installArchetypes.sh"/>
|
||||||
</exec>
|
</exec>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
|
|
|
@ -1,8 +1,5 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project xmlns="https://maven.apache.org/POM/4.0.0"
|
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation=
|
|
||||||
"https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|
||||||
<!--
|
<!--
|
||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
@ -24,8 +21,8 @@
|
||||||
-->
|
-->
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hbase-archetypes</artifactId>
|
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-archetypes</artifactId>
|
||||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||||
<relativePath>..</relativePath>
|
<relativePath>..</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/**
|
/*
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -38,19 +37,17 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Successful running of this application requires access to an active instance
|
* Successful running of this application requires access to an active instance of HBase. For
|
||||||
* of HBase. For install instructions for a standalone instance of HBase, please
|
* install instructions for a standalone instance of HBase, please refer to
|
||||||
* refer to https://hbase.apache.org/book.html#quickstart
|
* https://hbase.apache.org/book.html#quickstart
|
||||||
*/
|
*/
|
||||||
public final class HelloHBase {
|
public final class HelloHBase {
|
||||||
|
|
||||||
protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
|
protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
|
||||||
static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
|
static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
|
||||||
static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
|
static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
|
||||||
static final byte[] MY_FIRST_COLUMN_QUALIFIER
|
static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn");
|
||||||
= Bytes.toBytes("myFirstColumn");
|
static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn");
|
||||||
static final byte[] MY_SECOND_COLUMN_QUALIFIER
|
|
||||||
= Bytes.toBytes("mySecondColumn");
|
|
||||||
static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01");
|
static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01");
|
||||||
|
|
||||||
// Private constructor included here to avoid checkstyle warnings
|
// Private constructor included here to avoid checkstyle warnings
|
||||||
|
@ -61,20 +58,20 @@ public final class HelloHBase {
|
||||||
final boolean deleteAllAtEOJ = true;
|
final boolean deleteAllAtEOJ = true;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ConnectionFactory#createConnection() automatically looks for
|
* ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase
|
||||||
* hbase-site.xml (HBase configuration parameters) on the system's
|
* configuration parameters) on the system's CLASSPATH, to enable creation of Connection to
|
||||||
* CLASSPATH, to enable creation of Connection to HBase via ZooKeeper.
|
* HBase via ZooKeeper.
|
||||||
*/
|
*/
|
||||||
try (Connection connection = ConnectionFactory.createConnection();
|
try (Connection connection = ConnectionFactory.createConnection();
|
||||||
Admin admin = connection.getAdmin()) {
|
Admin admin = connection.getAdmin()) {
|
||||||
admin.getClusterMetrics(); // assure connection successfully established
|
admin.getClusterMetrics(); // assure connection successfully established
|
||||||
System.out.println("\n*** Hello HBase! -- Connection has been "
|
System.out
|
||||||
+ "established via ZooKeeper!!\n");
|
.println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n");
|
||||||
|
|
||||||
createNamespaceAndTable(admin);
|
createNamespaceAndTable(admin);
|
||||||
|
|
||||||
System.out.println("Getting a Table object for [" + MY_TABLE_NAME
|
System.out.println("Getting a Table object for [" + MY_TABLE_NAME
|
||||||
+ "] with which to perform CRUD operations in HBase.");
|
+ "] with which to perform CRUD operations in HBase.");
|
||||||
try (Table table = connection.getTable(MY_TABLE_NAME)) {
|
try (Table table = connection.getTable(MY_TABLE_NAME)) {
|
||||||
|
|
||||||
putRowToTable(table);
|
putRowToTable(table);
|
||||||
|
@ -92,9 +89,8 @@ public final class HelloHBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes Admin#createNamespace and Admin#createTable to create a namespace
|
* Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has
|
||||||
* with a table that has one column-family.
|
* one column-family.
|
||||||
*
|
|
||||||
* @param admin Standard Admin object
|
* @param admin Standard Admin object
|
||||||
* @throws IOException If IO problem encountered
|
* @throws IOException If IO problem encountered
|
||||||
*/
|
*/
|
||||||
|
@ -103,48 +99,38 @@ public final class HelloHBase {
|
||||||
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
|
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
|
||||||
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
|
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
|
||||||
|
|
||||||
admin.createNamespace(NamespaceDescriptor
|
admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build());
|
||||||
.create(MY_NAMESPACE_NAME).build());
|
|
||||||
}
|
}
|
||||||
if (!admin.tableExists(MY_TABLE_NAME)) {
|
if (!admin.tableExists(MY_TABLE_NAME)) {
|
||||||
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
|
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
|
||||||
+ "], with one Column Family ["
|
+ "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
|
||||||
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
|
|
||||||
TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
|
TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
|
||||||
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME))
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build();
|
||||||
.build();
|
|
||||||
admin.createTable(desc);
|
admin.createTable(desc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes Table#put to store a row (with two new columns created 'on the
|
* Invokes Table#put to store a row (with two new columns created 'on the fly') into the table.
|
||||||
* fly') into the table.
|
|
||||||
*
|
|
||||||
* @param table Standard Table object (used for CRUD operations).
|
* @param table Standard Table object (used for CRUD operations).
|
||||||
* @throws IOException If IO problem encountered
|
* @throws IOException If IO problem encountered
|
||||||
*/
|
*/
|
||||||
static void putRowToTable(final Table table) throws IOException {
|
static void putRowToTable(final Table table) throws IOException {
|
||||||
|
|
||||||
table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME,
|
table.put(new Put(MY_ROW_ID)
|
||||||
MY_FIRST_COLUMN_QUALIFIER,
|
.addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello"))
|
||||||
Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME,
|
.addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!")));
|
||||||
MY_SECOND_COLUMN_QUALIFIER,
|
|
||||||
Bytes.toBytes("World!")));
|
|
||||||
|
|
||||||
System.out.println("Row [" + Bytes.toString(MY_ROW_ID)
|
System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table ["
|
||||||
+ "] was put into Table ["
|
+ table.getName().getNameAsString() + "] in HBase;\n"
|
||||||
+ table.getName().getNameAsString() + "] in HBase;\n"
|
+ " the row's two columns (created 'on the fly') are: ["
|
||||||
+ " the row's two columns (created 'on the fly') are: ["
|
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
|
||||||
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
||||||
+ Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
|
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
|
||||||
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
|
||||||
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes Table#get and prints out the contents of the retrieved row.
|
* Invokes Table#get and prints out the contents of the retrieved row.
|
||||||
*
|
|
||||||
* @param table Standard Table object
|
* @param table Standard Table object
|
||||||
* @throws IOException If IO problem encountered
|
* @throws IOException If IO problem encountered
|
||||||
*/
|
*/
|
||||||
|
@ -152,38 +138,32 @@ public final class HelloHBase {
|
||||||
|
|
||||||
Result row = table.get(new Get(MY_ROW_ID));
|
Result row = table.get(new Get(MY_ROW_ID));
|
||||||
|
|
||||||
System.out.println("Row [" + Bytes.toString(row.getRow())
|
System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table ["
|
||||||
+ "] was retrieved from Table ["
|
+ table.getName().getNameAsString() + "] in HBase, with the following content:");
|
||||||
+ table.getName().getNameAsString()
|
|
||||||
+ "] in HBase, with the following content:");
|
|
||||||
|
|
||||||
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry
|
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap()
|
||||||
: row.getNoVersionMap().entrySet()) {
|
.entrySet()) {
|
||||||
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
|
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
|
||||||
|
|
||||||
System.out.println(" Columns in Column Family [" + columnFamilyName
|
System.out.println(" Columns in Column Family [" + columnFamilyName + "]:");
|
||||||
+ "]:");
|
|
||||||
|
|
||||||
for (Entry<byte[], byte[]> columnNameAndValueMap
|
for (Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) {
|
||||||
: colFamilyEntry.getValue().entrySet()) {
|
|
||||||
|
|
||||||
System.out.println(" Value of Column [" + columnFamilyName + ":"
|
System.out.println(" Value of Column [" + columnFamilyName + ":"
|
||||||
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
|
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
|
||||||
+ Bytes.toString(columnNameAndValueMap.getValue()));
|
+ Bytes.toString(columnNameAndValueMap.getValue()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks to see whether a namespace exists.
|
* Checks to see whether a namespace exists.
|
||||||
*
|
* @param admin Standard Admin object
|
||||||
* @param admin Standard Admin object
|
|
||||||
* @param namespaceName Name of namespace
|
* @param namespaceName Name of namespace
|
||||||
* @return true If namespace exists
|
* @return true If namespace exists
|
||||||
* @throws IOException If IO problem encountered
|
* @throws IOException If IO problem encountered
|
||||||
*/
|
*/
|
||||||
static boolean namespaceExists(final Admin admin, final String namespaceName)
|
static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException {
|
||||||
throws IOException {
|
|
||||||
try {
|
try {
|
||||||
admin.getNamespaceDescriptor(namespaceName);
|
admin.getNamespaceDescriptor(namespaceName);
|
||||||
} catch (NamespaceNotFoundException e) {
|
} catch (NamespaceNotFoundException e) {
|
||||||
|
@ -194,28 +174,24 @@ public final class HelloHBase {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes Table#delete to delete test data (i.e. the row)
|
* Invokes Table#delete to delete test data (i.e. the row)
|
||||||
*
|
|
||||||
* @param table Standard Table object
|
* @param table Standard Table object
|
||||||
* @throws IOException If IO problem is encountered
|
* @throws IOException If IO problem is encountered
|
||||||
*/
|
*/
|
||||||
static void deleteRow(final Table table) throws IOException {
|
static void deleteRow(final Table table) throws IOException {
|
||||||
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID)
|
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table ["
|
||||||
+ "] from Table ["
|
+ table.getName().getNameAsString() + "].");
|
||||||
+ table.getName().getNameAsString() + "].");
|
|
||||||
table.delete(new Delete(MY_ROW_ID));
|
table.delete(new Delete(MY_ROW_ID));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to
|
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete
|
||||||
* disable/delete Table and delete Namespace.
|
* Table and delete Namespace.
|
||||||
*
|
|
||||||
* @param admin Standard Admin object
|
* @param admin Standard Admin object
|
||||||
* @throws IOException If IO problem is encountered
|
* @throws IOException If IO problem is encountered
|
||||||
*/
|
*/
|
||||||
static void deleteNamespaceAndTable(final Admin admin) throws IOException {
|
static void deleteNamespaceAndTable(final Admin admin) throws IOException {
|
||||||
if (admin.tableExists(MY_TABLE_NAME)) {
|
if (admin.tableExists(MY_TABLE_NAME)) {
|
||||||
System.out.println("Disabling/deleting Table ["
|
System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "].");
|
||||||
+ MY_TABLE_NAME.getNameAsString() + "].");
|
|
||||||
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
|
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
|
||||||
admin.deleteTable(MY_TABLE_NAME);
|
admin.deleteTable(MY_TABLE_NAME);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -44,10 +44,9 @@ public class TestHelloHBase {
|
||||||
|
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
HBaseClassTestRule.forClass(TestHelloHBase.class);
|
HBaseClassTestRule.forClass(TestHelloHBase.class);
|
||||||
|
|
||||||
private static final HBaseTestingUtil TEST_UTIL
|
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
|
||||||
= new HBaseTestingUtil();
|
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void beforeClass() throws Exception {
|
public static void beforeClass() throws Exception {
|
||||||
|
@ -67,13 +66,11 @@ public class TestHelloHBase {
|
||||||
Admin admin = TEST_UTIL.getAdmin();
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
|
|
||||||
exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE);
|
exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE);
|
||||||
assertEquals("#namespaceExists failed: found nonexistent namespace.",
|
assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists);
|
||||||
false, exists);
|
|
||||||
|
|
||||||
admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build());
|
admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build());
|
||||||
exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE);
|
exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE);
|
||||||
assertEquals("#namespaceExists failed: did NOT find existing namespace.",
|
assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists);
|
||||||
true, exists);
|
|
||||||
admin.deleteNamespace(EXISTING_NAMESPACE);
|
admin.deleteNamespace(EXISTING_NAMESPACE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,14 +79,11 @@ public class TestHelloHBase {
|
||||||
Admin admin = TEST_UTIL.getAdmin();
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
HelloHBase.createNamespaceAndTable(admin);
|
HelloHBase.createNamespaceAndTable(admin);
|
||||||
|
|
||||||
boolean namespaceExists
|
boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
|
||||||
= HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
|
assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists);
|
||||||
assertEquals("#createNamespaceAndTable failed to create namespace.",
|
|
||||||
true, namespaceExists);
|
|
||||||
|
|
||||||
boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME);
|
boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME);
|
||||||
assertEquals("#createNamespaceAndTable failed to create table.",
|
assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists);
|
||||||
true, tableExists);
|
|
||||||
|
|
||||||
admin.disableTable(HelloHBase.MY_TABLE_NAME);
|
admin.disableTable(HelloHBase.MY_TABLE_NAME);
|
||||||
admin.deleteTable(HelloHBase.MY_TABLE_NAME);
|
admin.deleteTable(HelloHBase.MY_TABLE_NAME);
|
||||||
|
@ -100,8 +94,7 @@ public class TestHelloHBase {
|
||||||
public void testPutRowToTable() throws IOException {
|
public void testPutRowToTable() throws IOException {
|
||||||
Admin admin = TEST_UTIL.getAdmin();
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
||||||
Table table
|
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||||
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
|
||||||
|
|
||||||
HelloHBase.putRowToTable(table);
|
HelloHBase.putRowToTable(table);
|
||||||
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
||||||
|
@ -115,13 +108,10 @@ public class TestHelloHBase {
|
||||||
public void testDeleteRow() throws IOException {
|
public void testDeleteRow() throws IOException {
|
||||||
Admin admin = TEST_UTIL.getAdmin();
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
||||||
Table table
|
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||||
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
|
||||||
|
|
||||||
table.put(new Put(HelloHBase.MY_ROW_ID).
|
table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
|
||||||
addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
|
HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz")));
|
||||||
HelloHBase.MY_FIRST_COLUMN_QUALIFIER,
|
|
||||||
Bytes.toBytes("xyz")));
|
|
||||||
HelloHBase.deleteRow(table);
|
HelloHBase.deleteRow(table);
|
||||||
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
||||||
assertEquals("#deleteRow failed to delete row.", true, row.isEmpty());
|
assertEquals("#deleteRow failed to delete row.", true, row.isEmpty());
|
||||||
|
|
|
@ -1,8 +1,5 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project xmlns="https://maven.apache.org/POM/4.0.0"
|
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation=
|
|
||||||
"https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|
||||||
<!--
|
<!--
|
||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
@ -24,8 +21,8 @@
|
||||||
-->
|
-->
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hbase-archetypes</artifactId>
|
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-archetypes</artifactId>
|
||||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||||
<relativePath>..</relativePath>
|
<relativePath>..</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
@ -44,16 +41,16 @@
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
<artifactId>hbase-testing-util</artifactId>
|
<artifactId>hbase-testing-util</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>javax.xml.bind</groupId>
|
<groupId>javax.xml.bind</groupId>
|
||||||
<artifactId>jaxb-api</artifactId>
|
<artifactId>jaxb-api</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>javax.ws.rs</groupId>
|
<groupId>javax.ws.rs</groupId>
|
||||||
<artifactId>jsr311-api</artifactId>
|
<artifactId>jsr311-api</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/**
|
/*
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -37,19 +36,17 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Successful running of this application requires access to an active instance
|
* Successful running of this application requires access to an active instance of HBase. For
|
||||||
* of HBase. For install instructions for a standalone instance of HBase, please
|
* install instructions for a standalone instance of HBase, please refer to
|
||||||
* refer to https://hbase.apache.org/book.html#quickstart
|
* https://hbase.apache.org/book.html#quickstart
|
||||||
*/
|
*/
|
||||||
public final class HelloHBase {
|
public final class HelloHBase {
|
||||||
|
|
||||||
protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
|
protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
|
||||||
static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
|
static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
|
||||||
static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
|
static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
|
||||||
static final byte[] MY_FIRST_COLUMN_QUALIFIER
|
static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn");
|
||||||
= Bytes.toBytes("myFirstColumn");
|
static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn");
|
||||||
static final byte[] MY_SECOND_COLUMN_QUALIFIER
|
|
||||||
= Bytes.toBytes("mySecondColumn");
|
|
||||||
static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01");
|
static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01");
|
||||||
|
|
||||||
// Private constructor included here to avoid checkstyle warnings
|
// Private constructor included here to avoid checkstyle warnings
|
||||||
|
@ -60,20 +57,20 @@ public final class HelloHBase {
|
||||||
final boolean deleteAllAtEOJ = true;
|
final boolean deleteAllAtEOJ = true;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ConnectionFactory#createConnection() automatically looks for
|
* ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase
|
||||||
* hbase-site.xml (HBase configuration parameters) on the system's
|
* configuration parameters) on the system's CLASSPATH, to enable creation of Connection to
|
||||||
* CLASSPATH, to enable creation of Connection to HBase via ZooKeeper.
|
* HBase via ZooKeeper.
|
||||||
*/
|
*/
|
||||||
try (Connection connection = ConnectionFactory.createConnection();
|
try (Connection connection = ConnectionFactory.createConnection();
|
||||||
Admin admin = connection.getAdmin()) {
|
Admin admin = connection.getAdmin()) {
|
||||||
admin.getClusterMetrics(); // assure connection successfully established
|
admin.getClusterMetrics(); // assure connection successfully established
|
||||||
System.out.println("\n*** Hello HBase! -- Connection has been "
|
System.out
|
||||||
+ "established via ZooKeeper!!\n");
|
.println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n");
|
||||||
|
|
||||||
createNamespaceAndTable(admin);
|
createNamespaceAndTable(admin);
|
||||||
|
|
||||||
System.out.println("Getting a Table object for [" + MY_TABLE_NAME
|
System.out.println("Getting a Table object for [" + MY_TABLE_NAME
|
||||||
+ "] with which to perform CRUD operations in HBase.");
|
+ "] with which to perform CRUD operations in HBase.");
|
||||||
try (Table table = connection.getTable(MY_TABLE_NAME)) {
|
try (Table table = connection.getTable(MY_TABLE_NAME)) {
|
||||||
|
|
||||||
putRowToTable(table);
|
putRowToTable(table);
|
||||||
|
@ -91,9 +88,8 @@ public final class HelloHBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes Admin#createNamespace and Admin#createTable to create a namespace
|
* Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has
|
||||||
* with a table that has one column-family.
|
* one column-family.
|
||||||
*
|
|
||||||
* @param admin Standard Admin object
|
* @param admin Standard Admin object
|
||||||
* @throws IOException If IO problem encountered
|
* @throws IOException If IO problem encountered
|
||||||
*/
|
*/
|
||||||
|
@ -102,13 +98,11 @@ public final class HelloHBase {
|
||||||
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
|
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
|
||||||
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
|
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
|
||||||
|
|
||||||
admin.createNamespace(NamespaceDescriptor
|
admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build());
|
||||||
.create(MY_NAMESPACE_NAME).build());
|
|
||||||
}
|
}
|
||||||
if (!admin.tableExists(MY_TABLE_NAME)) {
|
if (!admin.tableExists(MY_TABLE_NAME)) {
|
||||||
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
|
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
|
||||||
+ "], with one Column Family ["
|
+ "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
|
||||||
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
|
|
||||||
|
|
||||||
admin.createTable(TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
|
admin.createTable(TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
|
||||||
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build());
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build());
|
||||||
|
@ -116,33 +110,26 @@ public final class HelloHBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes Table#put to store a row (with two new columns created 'on the
|
* Invokes Table#put to store a row (with two new columns created 'on the fly') into the table.
|
||||||
* fly') into the table.
|
|
||||||
*
|
|
||||||
* @param table Standard Table object (used for CRUD operations).
|
* @param table Standard Table object (used for CRUD operations).
|
||||||
* @throws IOException If IO problem encountered
|
* @throws IOException If IO problem encountered
|
||||||
*/
|
*/
|
||||||
static void putRowToTable(final Table table) throws IOException {
|
static void putRowToTable(final Table table) throws IOException {
|
||||||
|
|
||||||
table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME,
|
table.put(new Put(MY_ROW_ID)
|
||||||
MY_FIRST_COLUMN_QUALIFIER,
|
.addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello"))
|
||||||
Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME,
|
.addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!")));
|
||||||
MY_SECOND_COLUMN_QUALIFIER,
|
|
||||||
Bytes.toBytes("World!")));
|
|
||||||
|
|
||||||
System.out.println("Row [" + Bytes.toString(MY_ROW_ID)
|
System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table ["
|
||||||
+ "] was put into Table ["
|
+ table.getName().getNameAsString() + "] in HBase;\n"
|
||||||
+ table.getName().getNameAsString() + "] in HBase;\n"
|
+ " the row's two columns (created 'on the fly') are: ["
|
||||||
+ " the row's two columns (created 'on the fly') are: ["
|
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
|
||||||
+ Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
||||||
+ Bytes.toString(MY_FIRST_COLUMN_QUALIFIER)
|
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
|
||||||
+ "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":"
|
|
||||||
+ Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes Table#get and prints out the contents of the retrieved row.
|
* Invokes Table#get and prints out the contents of the retrieved row.
|
||||||
*
|
|
||||||
* @param table Standard Table object
|
* @param table Standard Table object
|
||||||
* @throws IOException If IO problem encountered
|
* @throws IOException If IO problem encountered
|
||||||
*/
|
*/
|
||||||
|
@ -150,38 +137,32 @@ public final class HelloHBase {
|
||||||
|
|
||||||
Result row = table.get(new Get(MY_ROW_ID));
|
Result row = table.get(new Get(MY_ROW_ID));
|
||||||
|
|
||||||
System.out.println("Row [" + Bytes.toString(row.getRow())
|
System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table ["
|
||||||
+ "] was retrieved from Table ["
|
+ table.getName().getNameAsString() + "] in HBase, with the following content:");
|
||||||
+ table.getName().getNameAsString()
|
|
||||||
+ "] in HBase, with the following content:");
|
|
||||||
|
|
||||||
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry
|
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap()
|
||||||
: row.getNoVersionMap().entrySet()) {
|
.entrySet()) {
|
||||||
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
|
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
|
||||||
|
|
||||||
System.out.println(" Columns in Column Family [" + columnFamilyName
|
System.out.println(" Columns in Column Family [" + columnFamilyName + "]:");
|
||||||
+ "]:");
|
|
||||||
|
|
||||||
for (Entry<byte[], byte[]> columnNameAndValueMap
|
for (Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) {
|
||||||
: colFamilyEntry.getValue().entrySet()) {
|
|
||||||
|
|
||||||
System.out.println(" Value of Column [" + columnFamilyName + ":"
|
System.out.println(" Value of Column [" + columnFamilyName + ":"
|
||||||
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
|
+ Bytes.toString(columnNameAndValueMap.getKey()) + "] == "
|
||||||
+ Bytes.toString(columnNameAndValueMap.getValue()));
|
+ Bytes.toString(columnNameAndValueMap.getValue()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks to see whether a namespace exists.
|
* Checks to see whether a namespace exists.
|
||||||
*
|
* @param admin Standard Admin object
|
||||||
* @param admin Standard Admin object
|
|
||||||
* @param namespaceName Name of namespace
|
* @param namespaceName Name of namespace
|
||||||
* @return true If namespace exists
|
* @return true If namespace exists
|
||||||
* @throws IOException If IO problem encountered
|
* @throws IOException If IO problem encountered
|
||||||
*/
|
*/
|
||||||
static boolean namespaceExists(final Admin admin, final String namespaceName)
|
static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException {
|
||||||
throws IOException {
|
|
||||||
try {
|
try {
|
||||||
admin.getNamespaceDescriptor(namespaceName);
|
admin.getNamespaceDescriptor(namespaceName);
|
||||||
} catch (NamespaceNotFoundException e) {
|
} catch (NamespaceNotFoundException e) {
|
||||||
|
@ -192,28 +173,24 @@ public final class HelloHBase {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes Table#delete to delete test data (i.e. the row)
|
* Invokes Table#delete to delete test data (i.e. the row)
|
||||||
*
|
|
||||||
* @param table Standard Table object
|
* @param table Standard Table object
|
||||||
* @throws IOException If IO problem is encountered
|
* @throws IOException If IO problem is encountered
|
||||||
*/
|
*/
|
||||||
static void deleteRow(final Table table) throws IOException {
|
static void deleteRow(final Table table) throws IOException {
|
||||||
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID)
|
System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table ["
|
||||||
+ "] from Table ["
|
+ table.getName().getNameAsString() + "].");
|
||||||
+ table.getName().getNameAsString() + "].");
|
|
||||||
table.delete(new Delete(MY_ROW_ID));
|
table.delete(new Delete(MY_ROW_ID));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to
|
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete
|
||||||
* disable/delete Table and delete Namespace.
|
* Table and delete Namespace.
|
||||||
*
|
|
||||||
* @param admin Standard Admin object
|
* @param admin Standard Admin object
|
||||||
* @throws IOException If IO problem is encountered
|
* @throws IOException If IO problem is encountered
|
||||||
*/
|
*/
|
||||||
static void deleteNamespaceAndTable(final Admin admin) throws IOException {
|
static void deleteNamespaceAndTable(final Admin admin) throws IOException {
|
||||||
if (admin.tableExists(MY_TABLE_NAME)) {
|
if (admin.tableExists(MY_TABLE_NAME)) {
|
||||||
System.out.println("Disabling/deleting Table ["
|
System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "].");
|
||||||
+ MY_TABLE_NAME.getNameAsString() + "].");
|
|
||||||
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
|
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
|
||||||
admin.deleteTable(MY_TABLE_NAME);
|
admin.deleteTable(MY_TABLE_NAME);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -44,10 +44,9 @@ public class TestHelloHBase {
|
||||||
|
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
HBaseClassTestRule.forClass(TestHelloHBase.class);
|
HBaseClassTestRule.forClass(TestHelloHBase.class);
|
||||||
|
|
||||||
private static final HBaseTestingUtil TEST_UTIL
|
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
|
||||||
= new HBaseTestingUtil();
|
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void beforeClass() throws Exception {
|
public static void beforeClass() throws Exception {
|
||||||
|
@ -67,13 +66,11 @@ public class TestHelloHBase {
|
||||||
Admin admin = TEST_UTIL.getAdmin();
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
|
|
||||||
exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE);
|
exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE);
|
||||||
assertEquals("#namespaceExists failed: found nonexistent namespace.",
|
assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists);
|
||||||
false, exists);
|
|
||||||
|
|
||||||
admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build());
|
admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build());
|
||||||
exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE);
|
exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE);
|
||||||
assertEquals("#namespaceExists failed: did NOT find existing namespace.",
|
assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists);
|
||||||
true, exists);
|
|
||||||
admin.deleteNamespace(EXISTING_NAMESPACE);
|
admin.deleteNamespace(EXISTING_NAMESPACE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,14 +79,11 @@ public class TestHelloHBase {
|
||||||
Admin admin = TEST_UTIL.getAdmin();
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
HelloHBase.createNamespaceAndTable(admin);
|
HelloHBase.createNamespaceAndTable(admin);
|
||||||
|
|
||||||
boolean namespaceExists
|
boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
|
||||||
= HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME);
|
assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists);
|
||||||
assertEquals("#createNamespaceAndTable failed to create namespace.",
|
|
||||||
true, namespaceExists);
|
|
||||||
|
|
||||||
boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME);
|
boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME);
|
||||||
assertEquals("#createNamespaceAndTable failed to create table.",
|
assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists);
|
||||||
true, tableExists);
|
|
||||||
|
|
||||||
admin.disableTable(HelloHBase.MY_TABLE_NAME);
|
admin.disableTable(HelloHBase.MY_TABLE_NAME);
|
||||||
admin.deleteTable(HelloHBase.MY_TABLE_NAME);
|
admin.deleteTable(HelloHBase.MY_TABLE_NAME);
|
||||||
|
@ -100,8 +94,7 @@ public class TestHelloHBase {
|
||||||
public void testPutRowToTable() throws IOException {
|
public void testPutRowToTable() throws IOException {
|
||||||
Admin admin = TEST_UTIL.getAdmin();
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
||||||
Table table
|
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||||
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
|
||||||
|
|
||||||
HelloHBase.putRowToTable(table);
|
HelloHBase.putRowToTable(table);
|
||||||
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
||||||
|
@ -115,13 +108,10 @@ public class TestHelloHBase {
|
||||||
public void testDeleteRow() throws IOException {
|
public void testDeleteRow() throws IOException {
|
||||||
Admin admin = TEST_UTIL.getAdmin();
|
Admin admin = TEST_UTIL.getAdmin();
|
||||||
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build());
|
||||||
Table table
|
Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
||||||
= TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME);
|
|
||||||
|
|
||||||
table.put(new Put(HelloHBase.MY_ROW_ID).
|
table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
|
||||||
addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME,
|
HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz")));
|
||||||
HelloHBase.MY_FIRST_COLUMN_QUALIFIER,
|
|
||||||
Bytes.toBytes("xyz")));
|
|
||||||
HelloHBase.deleteRow(table);
|
HelloHBase.deleteRow(table);
|
||||||
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
Result row = table.get(new Get(HelloHBase.MY_ROW_ID));
|
||||||
assertEquals("#deleteRow failed to delete row.", true, row.isEmpty());
|
assertEquals("#deleteRow failed to delete row.", true, row.isEmpty());
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
<?xml version="1.0"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
|
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|
||||||
<!--
|
<!--
|
||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
@ -22,8 +21,8 @@
|
||||||
-->
|
-->
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hbase-build-configuration</artifactId>
|
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-build-configuration</artifactId>
|
||||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||||
<relativePath>../hbase-build-configuration</relativePath>
|
<relativePath>../hbase-build-configuration</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
@ -68,10 +67,10 @@
|
||||||
<artifactId>spotbugs-maven-plugin</artifactId>
|
<artifactId>spotbugs-maven-plugin</artifactId>
|
||||||
<executions>
|
<executions>
|
||||||
<execution>
|
<execution>
|
||||||
<inherited>false</inherited>
|
|
||||||
<goals>
|
<goals>
|
||||||
<goal>spotbugs</goal>
|
<goal>spotbugs</goal>
|
||||||
</goals>
|
</goals>
|
||||||
|
<inherited>false</inherited>
|
||||||
<configuration>
|
<configuration>
|
||||||
<excludeFilterFile>${project.basedir}/../dev-support/spotbugs-exclude.xml</excludeFilterFile>
|
<excludeFilterFile>${project.basedir}/../dev-support/spotbugs-exclude.xml</excludeFilterFile>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
<?xml version="1.0"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<!--
|
<!--
|
||||||
/**
|
/**
|
||||||
|
@ -21,160 +21,18 @@
|
||||||
-->
|
-->
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hbase-build-configuration</artifactId>
|
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-build-configuration</artifactId>
|
||||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||||
<relativePath>../hbase-build-configuration</relativePath>
|
<relativePath>../hbase-build-configuration</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<artifactId>hbase-assembly</artifactId>
|
<artifactId>hbase-assembly</artifactId>
|
||||||
<name>Apache HBase - Assembly</name>
|
|
||||||
<description>
|
|
||||||
Module that does project assembly and that is all that it does.
|
|
||||||
</description>
|
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
<name>Apache HBase - Assembly</name>
|
||||||
|
<description>Module that does project assembly and that is all that it does.</description>
|
||||||
<properties>
|
<properties>
|
||||||
<license.bundles.dependencies>true</license.bundles.dependencies>
|
<license.bundles.dependencies>true</license.bundles.dependencies>
|
||||||
</properties>
|
</properties>
|
||||||
<build>
|
|
||||||
<plugins>
|
|
||||||
<!-- licensing info from our dependencies -->
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-remote-resources-plugin</artifactId>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>aggregate-licenses</id>
|
|
||||||
<goals>
|
|
||||||
<goal>process</goal>
|
|
||||||
</goals>
|
|
||||||
<configuration>
|
|
||||||
<properties>
|
|
||||||
<copyright-end-year>${build.year}</copyright-end-year>
|
|
||||||
<debug-print-included-work-info>${license.debug.print.included}</debug-print-included-work-info>
|
|
||||||
<bundled-dependencies>${license.bundles.dependencies}</bundled-dependencies>
|
|
||||||
<bundled-jquery>${license.bundles.jquery}</bundled-jquery>
|
|
||||||
<bundled-vega>${license.bundles.vega}</bundled-vega>
|
|
||||||
<bundled-logo>${license.bundles.logo}</bundled-logo>
|
|
||||||
<bundled-bootstrap>${license.bundles.bootstrap}</bundled-bootstrap>
|
|
||||||
</properties>
|
|
||||||
<resourceBundles>
|
|
||||||
<resourceBundle>${project.groupId}:hbase-resource-bundle:${project.version}</resourceBundle>
|
|
||||||
</resourceBundles>
|
|
||||||
<supplementalModelArtifacts>
|
|
||||||
<supplementalModelArtifact>${project.groupId}:hbase-resource-bundle:${project.version}</supplementalModelArtifact>
|
|
||||||
</supplementalModelArtifacts>
|
|
||||||
<supplementalModels>
|
|
||||||
<supplementalModel>supplemental-models.xml</supplementalModel>
|
|
||||||
</supplementalModels>
|
|
||||||
</configuration>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<artifactId>maven-assembly-plugin</artifactId>
|
|
||||||
<configuration>
|
|
||||||
<!--Else will use hbase-assembly as final name.-->
|
|
||||||
<finalName>hbase-${project.version}</finalName>
|
|
||||||
<skipAssembly>false</skipAssembly>
|
|
||||||
<appendAssemblyId>true</appendAssemblyId>
|
|
||||||
<tarLongFileMode>posix</tarLongFileMode>
|
|
||||||
<descriptors>
|
|
||||||
<descriptor>${assembly.file}</descriptor>
|
|
||||||
<descriptor>src/main/assembly/client.xml</descriptor>
|
|
||||||
</descriptors>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<artifactId>maven-dependency-plugin</artifactId>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<!-- generates the file that will be used by the bin/hbase script in the dev env -->
|
|
||||||
<id>create-hbase-generated-classpath</id>
|
|
||||||
<phase>test</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>build-classpath</goal>
|
|
||||||
</goals>
|
|
||||||
<configuration>
|
|
||||||
<outputFile>${project.parent.basedir}/target/cached_classpath.txt</outputFile>
|
|
||||||
<excludeArtifactIds>jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce</excludeArtifactIds>
|
|
||||||
</configuration>
|
|
||||||
</execution>
|
|
||||||
|
|
||||||
<execution>
|
|
||||||
<!-- generates the file that will be used by the bin/hbase zkcli script in the dev env -->
|
|
||||||
<id>create-hbase-generated-classpath-jline</id>
|
|
||||||
<phase>test</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>build-classpath</goal>
|
|
||||||
</goals>
|
|
||||||
<configuration>
|
|
||||||
<outputFile>${project.parent.basedir}/target/cached_classpath_jline.txt</outputFile>
|
|
||||||
<includeArtifactIds>jline</includeArtifactIds>
|
|
||||||
</configuration>
|
|
||||||
</execution>
|
|
||||||
|
|
||||||
<execution>
|
|
||||||
<!-- generates the file that will be used by the bin/hbase shell script in the dev env -->
|
|
||||||
<id>create-hbase-generated-classpath-jruby</id>
|
|
||||||
<phase>test</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>build-classpath</goal>
|
|
||||||
</goals>
|
|
||||||
<configuration>
|
|
||||||
<outputFile>${project.parent.basedir}/target/cached_classpath_jruby.txt</outputFile>
|
|
||||||
<includeArtifactIds>jruby-complete</includeArtifactIds>
|
|
||||||
</configuration>
|
|
||||||
</execution>
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Build an aggregation of our templated NOTICE file and the NOTICE files in our dependencies.
|
|
||||||
If MASSEMBLY-382 is fixed we could do this in the assembly
|
|
||||||
Currently relies on env, bash, find, and cat.
|
|
||||||
-->
|
|
||||||
<execution>
|
|
||||||
<!-- put all of the NOTICE files out of our dependencies -->
|
|
||||||
<id>unpack-dependency-notices</id>
|
|
||||||
<phase>prepare-package</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>unpack-dependencies</goal>
|
|
||||||
</goals>
|
|
||||||
<configuration>
|
|
||||||
<excludeTypes>pom</excludeTypes>
|
|
||||||
<useSubDirectoryPerArtifact>true</useSubDirectoryPerArtifact>
|
|
||||||
<includes>**\/NOTICE,**\/NOTICE.txt</includes>
|
|
||||||
</configuration>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.codehaus.mojo</groupId>
|
|
||||||
<artifactId>exec-maven-plugin</artifactId>
|
|
||||||
<version>${exec.maven.version}</version>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>concat-NOTICE-files</id>
|
|
||||||
<phase>package</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>exec</goal>
|
|
||||||
</goals>
|
|
||||||
<configuration>
|
|
||||||
<executable>env</executable>
|
|
||||||
<arguments>
|
|
||||||
<argument>bash</argument>
|
|
||||||
<argument>-c</argument>
|
|
||||||
<argument>cat maven-shared-archive-resources/META-INF/NOTICE \
|
|
||||||
`find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt`
|
|
||||||
</argument>
|
|
||||||
</arguments>
|
|
||||||
<outputFile>${project.build.directory}/NOTICE.aggregate</outputFile>
|
|
||||||
<workingDirectory>${project.build.directory}</workingDirectory>
|
|
||||||
</configuration>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<!-- /end building aggregation of NOTICE files -->
|
|
||||||
</plugins>
|
|
||||||
</build>
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<!-- client artifacts for downstream use -->
|
<!-- client artifacts for downstream use -->
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -189,7 +47,7 @@
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
<artifactId>hbase-shaded-mapreduce</artifactId>
|
<artifactId>hbase-shaded-mapreduce</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- Intra-project dependencies -->
|
<!-- Intra-project dependencies -->
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
<artifactId>hbase-it</artifactId>
|
<artifactId>hbase-it</artifactId>
|
||||||
|
@ -254,25 +112,25 @@
|
||||||
<artifactId>hbase-external-blockcache</artifactId>
|
<artifactId>hbase-external-blockcache</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
<artifactId>hbase-testing-util</artifactId>
|
<artifactId>hbase-testing-util</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
<artifactId>hbase-metrics-api</artifactId>
|
<artifactId>hbase-metrics-api</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
<artifactId>hbase-metrics</artifactId>
|
<artifactId>hbase-metrics</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
<artifactId>hbase-protocol-shaded</artifactId>
|
<artifactId>hbase-protocol-shaded</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
<artifactId>hbase-resource-bundle</artifactId>
|
<artifactId>hbase-resource-bundle</artifactId>
|
||||||
<optional>true</optional>
|
<optional>true</optional>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.httpcomponents</groupId>
|
<groupId>org.apache.httpcomponents</groupId>
|
||||||
|
@ -390,4 +248,143 @@
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<!-- licensing info from our dependencies -->
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-remote-resources-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>aggregate-licenses</id>
|
||||||
|
<goals>
|
||||||
|
<goal>process</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<properties>
|
||||||
|
<copyright-end-year>${build.year}</copyright-end-year>
|
||||||
|
<debug-print-included-work-info>${license.debug.print.included}</debug-print-included-work-info>
|
||||||
|
<bundled-dependencies>${license.bundles.dependencies}</bundled-dependencies>
|
||||||
|
<bundled-jquery>${license.bundles.jquery}</bundled-jquery>
|
||||||
|
<bundled-vega>${license.bundles.vega}</bundled-vega>
|
||||||
|
<bundled-logo>${license.bundles.logo}</bundled-logo>
|
||||||
|
<bundled-bootstrap>${license.bundles.bootstrap}</bundled-bootstrap>
|
||||||
|
</properties>
|
||||||
|
<resourceBundles>
|
||||||
|
<resourceBundle>${project.groupId}:hbase-resource-bundle:${project.version}</resourceBundle>
|
||||||
|
</resourceBundles>
|
||||||
|
<supplementalModelArtifacts>
|
||||||
|
<supplementalModelArtifact>${project.groupId}:hbase-resource-bundle:${project.version}</supplementalModelArtifact>
|
||||||
|
</supplementalModelArtifacts>
|
||||||
|
<supplementalModels>
|
||||||
|
<supplementalModel>supplemental-models.xml</supplementalModel>
|
||||||
|
</supplementalModels>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<artifactId>maven-assembly-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<!--Else will use hbase-assembly as final name.-->
|
||||||
|
<finalName>hbase-${project.version}</finalName>
|
||||||
|
<skipAssembly>false</skipAssembly>
|
||||||
|
<appendAssemblyId>true</appendAssemblyId>
|
||||||
|
<tarLongFileMode>posix</tarLongFileMode>
|
||||||
|
<descriptors>
|
||||||
|
<descriptor>${assembly.file}</descriptor>
|
||||||
|
<descriptor>src/main/assembly/client.xml</descriptor>
|
||||||
|
</descriptors>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<artifactId>maven-dependency-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<!-- generates the file that will be used by the bin/hbase script in the dev env -->
|
||||||
|
<id>create-hbase-generated-classpath</id>
|
||||||
|
<goals>
|
||||||
|
<goal>build-classpath</goal>
|
||||||
|
</goals>
|
||||||
|
<phase>test</phase>
|
||||||
|
<configuration>
|
||||||
|
<outputFile>${project.parent.basedir}/target/cached_classpath.txt</outputFile>
|
||||||
|
<excludeArtifactIds>jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce</excludeArtifactIds>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
|
||||||
|
<execution>
|
||||||
|
<!-- generates the file that will be used by the bin/hbase zkcli script in the dev env -->
|
||||||
|
<id>create-hbase-generated-classpath-jline</id>
|
||||||
|
<goals>
|
||||||
|
<goal>build-classpath</goal>
|
||||||
|
</goals>
|
||||||
|
<phase>test</phase>
|
||||||
|
<configuration>
|
||||||
|
<outputFile>${project.parent.basedir}/target/cached_classpath_jline.txt</outputFile>
|
||||||
|
<includeArtifactIds>jline</includeArtifactIds>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
|
||||||
|
<execution>
|
||||||
|
<!-- generates the file that will be used by the bin/hbase shell script in the dev env -->
|
||||||
|
<id>create-hbase-generated-classpath-jruby</id>
|
||||||
|
<goals>
|
||||||
|
<goal>build-classpath</goal>
|
||||||
|
</goals>
|
||||||
|
<phase>test</phase>
|
||||||
|
<configuration>
|
||||||
|
<outputFile>${project.parent.basedir}/target/cached_classpath_jruby.txt</outputFile>
|
||||||
|
<includeArtifactIds>jruby-complete</includeArtifactIds>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Build an aggregation of our templated NOTICE file and the NOTICE files in our dependencies.
|
||||||
|
If MASSEMBLY-382 is fixed we could do this in the assembly
|
||||||
|
Currently relies on env, bash, find, and cat.
|
||||||
|
-->
|
||||||
|
<execution>
|
||||||
|
<!-- put all of the NOTICE files out of our dependencies -->
|
||||||
|
<id>unpack-dependency-notices</id>
|
||||||
|
<goals>
|
||||||
|
<goal>unpack-dependencies</goal>
|
||||||
|
</goals>
|
||||||
|
<phase>prepare-package</phase>
|
||||||
|
<configuration>
|
||||||
|
<excludeTypes>pom</excludeTypes>
|
||||||
|
<useSubDirectoryPerArtifact>true</useSubDirectoryPerArtifact>
|
||||||
|
<includes>**\/NOTICE,**\/NOTICE.txt</includes>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
|
<artifactId>exec-maven-plugin</artifactId>
|
||||||
|
<version>${exec.maven.version}</version>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>concat-NOTICE-files</id>
|
||||||
|
<goals>
|
||||||
|
<goal>exec</goal>
|
||||||
|
</goals>
|
||||||
|
<phase>package</phase>
|
||||||
|
<configuration>
|
||||||
|
<executable>env</executable>
|
||||||
|
<arguments>
|
||||||
|
<argument>bash</argument>
|
||||||
|
<argument>-c</argument>
|
||||||
|
<argument>cat maven-shared-archive-resources/META-INF/NOTICE \
|
||||||
|
`find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt`</argument>
|
||||||
|
</arguments>
|
||||||
|
<outputFile>${project.build.directory}/NOTICE.aggregate</outputFile>
|
||||||
|
<workingDirectory>${project.build.directory}</workingDirectory>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<!-- /end building aggregation of NOTICE files -->
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
<?xml version="1.0"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance"
|
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|
||||||
<!--
|
<!--
|
||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
@ -22,8 +21,8 @@
|
||||||
-->
|
-->
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hbase-build-configuration</artifactId>
|
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-build-configuration</artifactId>
|
||||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||||
<relativePath>../hbase-build-configuration</relativePath>
|
<relativePath>../hbase-build-configuration</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
@ -31,33 +30,6 @@
|
||||||
<artifactId>hbase-asyncfs</artifactId>
|
<artifactId>hbase-asyncfs</artifactId>
|
||||||
<name>Apache HBase - Asynchronous FileSystem</name>
|
<name>Apache HBase - Asynchronous FileSystem</name>
|
||||||
<description>HBase Asynchronous FileSystem Implementation for WAL</description>
|
<description>HBase Asynchronous FileSystem Implementation for WAL</description>
|
||||||
<build>
|
|
||||||
<plugins>
|
|
||||||
<!-- Make a jar and put the sources in the jar -->
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-source-plugin</artifactId>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<!--Make it so assembly:single does nothing in here-->
|
|
||||||
<artifactId>maven-assembly-plugin</artifactId>
|
|
||||||
<configuration>
|
|
||||||
<skipAssembly>true</skipAssembly>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>net.revelc.code</groupId>
|
|
||||||
<artifactId>warbucks-maven-plugin</artifactId>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
|
||||||
<configuration>
|
|
||||||
<failOnViolation>true</failOnViolation>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
|
||||||
</build>
|
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -169,13 +141,42 @@
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<!-- Make a jar and put the sources in the jar -->
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-source-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<!--Make it so assembly:single does nothing in here-->
|
||||||
|
<artifactId>maven-assembly-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<skipAssembly>true</skipAssembly>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>net.revelc.code</groupId>
|
||||||
|
<artifactId>warbucks-maven-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<failOnViolation>true</failOnViolation>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
<profiles>
|
<profiles>
|
||||||
<!-- Profiles for building against different hadoop versions -->
|
<!-- Profiles for building against different hadoop versions -->
|
||||||
<profile>
|
<profile>
|
||||||
<id>hadoop-3.0</id>
|
<id>hadoop-3.0</id>
|
||||||
<activation>
|
<activation>
|
||||||
<property><name>!hadoop.profile</name></property>
|
<property>
|
||||||
|
<name>!hadoop.profile</name>
|
||||||
|
</property>
|
||||||
</activation>
|
</activation>
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -224,8 +225,7 @@
|
||||||
<artifactId>lifecycle-mapping</artifactId>
|
<artifactId>lifecycle-mapping</artifactId>
|
||||||
<configuration>
|
<configuration>
|
||||||
<lifecycleMappingMetadata>
|
<lifecycleMappingMetadata>
|
||||||
<pluginExecutions>
|
<pluginExecutions/>
|
||||||
</pluginExecutions>
|
|
||||||
</lifecycleMappingMetadata>
|
</lifecycleMappingMetadata>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -21,10 +21,9 @@ import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.util.CancelableProgressable;
|
import org.apache.hadoop.hbase.util.CancelableProgressable;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface for asynchronous filesystem output stream.
|
* Interface for asynchronous filesystem output stream.
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -47,9 +47,9 @@ public final class AsyncFSOutputHelper {
|
||||||
* implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}.
|
* implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}.
|
||||||
*/
|
*/
|
||||||
public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite,
|
public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite,
|
||||||
boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup,
|
boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup,
|
||||||
Class<? extends Channel> channelClass, StreamSlowMonitor monitor)
|
Class<? extends Channel> channelClass, StreamSlowMonitor monitor)
|
||||||
throws IOException, CommonFSUtils.StreamLacksCapabilityException {
|
throws IOException, CommonFSUtils.StreamLacksCapabilityException {
|
||||||
if (fs instanceof DistributedFileSystem) {
|
if (fs instanceof DistributedFileSystem) {
|
||||||
return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f,
|
return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f,
|
||||||
overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor);
|
overwrite, createParent, replication, blockSize, eventLoopGroup, channelClass, monitor);
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -180,7 +180,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
|
|
||||||
// State for connections to DN
|
// State for connections to DN
|
||||||
private enum State {
|
private enum State {
|
||||||
STREAMING, CLOSING, BROKEN, CLOSED
|
STREAMING,
|
||||||
|
CLOSING,
|
||||||
|
BROKEN,
|
||||||
|
CLOSED
|
||||||
}
|
}
|
||||||
|
|
||||||
private volatile State state;
|
private volatile State state;
|
||||||
|
@ -196,7 +199,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
if (c.unfinishedReplicas.remove(channel.id())) {
|
if (c.unfinishedReplicas.remove(channel.id())) {
|
||||||
long current = EnvironmentEdgeManager.currentTime();
|
long current = EnvironmentEdgeManager.currentTime();
|
||||||
streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen,
|
streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen,
|
||||||
current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size());
|
current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size());
|
||||||
c.lastAckTimestamp = current;
|
c.lastAckTimestamp = current;
|
||||||
if (c.unfinishedReplicas.isEmpty()) {
|
if (c.unfinishedReplicas.isEmpty()) {
|
||||||
// we need to remove first before complete the future. It is possible that after we
|
// we need to remove first before complete the future. It is possible that after we
|
||||||
|
@ -284,13 +287,13 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception {
|
protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception {
|
||||||
Status reply = getStatus(ack);
|
Status reply = getStatus(ack);
|
||||||
if (reply != Status.SUCCESS) {
|
if (reply != Status.SUCCESS) {
|
||||||
failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " +
|
failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + block
|
||||||
block + " from datanode " + ctx.channel().remoteAddress()));
|
+ " from datanode " + ctx.channel().remoteAddress()));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (PipelineAck.isRestartOOBStatus(reply)) {
|
if (PipelineAck.isRestartOOBStatus(reply)) {
|
||||||
failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " +
|
failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block "
|
||||||
block + " from datanode " + ctx.channel().remoteAddress()));
|
+ block + " from datanode " + ctx.channel().remoteAddress()));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (ack.getSeqno() == HEART_BEAT_SEQNO) {
|
if (ack.getSeqno() == HEART_BEAT_SEQNO) {
|
||||||
|
@ -345,10 +348,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs,
|
FanOutOneBlockAsyncDFSOutput(Configuration conf, DistributedFileSystem dfs, DFSClient client,
|
||||||
DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId,
|
ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock,
|
||||||
LocatedBlock locatedBlock, Encryptor encryptor, Map<Channel, DatanodeInfo> datanodeInfoMap,
|
Encryptor encryptor, Map<Channel, DatanodeInfo> datanodeInfoMap, DataChecksum summer,
|
||||||
DataChecksum summer, ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) {
|
ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.dfs = dfs;
|
this.dfs = dfs;
|
||||||
this.client = client;
|
this.client = client;
|
||||||
|
@ -403,7 +406,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void flushBuffer(CompletableFuture<Long> future, ByteBuf dataBuf,
|
private void flushBuffer(CompletableFuture<Long> future, ByteBuf dataBuf,
|
||||||
long nextPacketOffsetInBlock, boolean syncBlock) {
|
long nextPacketOffsetInBlock, boolean syncBlock) {
|
||||||
int dataLen = dataBuf.readableBytes();
|
int dataLen = dataBuf.readableBytes();
|
||||||
int chunkLen = summer.getBytesPerChecksum();
|
int chunkLen = summer.getBytesPerChecksum();
|
||||||
int trailingPartialChunkLen = dataLen % chunkLen;
|
int trailingPartialChunkLen = dataLen % chunkLen;
|
||||||
|
@ -413,13 +416,13 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
|
summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
|
||||||
checksumBuf.writerIndex(checksumLen);
|
checksumBuf.writerIndex(checksumLen);
|
||||||
PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock,
|
PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock,
|
||||||
nextPacketSeqno, false, dataLen, syncBlock);
|
nextPacketSeqno, false, dataLen, syncBlock);
|
||||||
int headerLen = header.getSerializedSize();
|
int headerLen = header.getSerializedSize();
|
||||||
ByteBuf headerBuf = alloc.buffer(headerLen);
|
ByteBuf headerBuf = alloc.buffer(headerLen);
|
||||||
header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
|
header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
|
||||||
headerBuf.writerIndex(headerLen);
|
headerBuf.writerIndex(headerLen);
|
||||||
Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen,
|
Callback c =
|
||||||
datanodeInfoMap.keySet(), dataLen);
|
new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeInfoMap.keySet(), dataLen);
|
||||||
waitingAckQueue.addLast(c);
|
waitingAckQueue.addLast(c);
|
||||||
// recheck again after we pushed the callback to queue
|
// recheck again after we pushed the callback to queue
|
||||||
if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) {
|
if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) {
|
||||||
|
@ -429,7 +432,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// TODO: we should perhaps measure time taken per DN here;
|
// TODO: we should perhaps measure time taken per DN here;
|
||||||
// we could collect statistics per DN, and/or exclude bad nodes in createOutput.
|
// we could collect statistics per DN, and/or exclude bad nodes in createOutput.
|
||||||
datanodeInfoMap.keySet().forEach(ch -> {
|
datanodeInfoMap.keySet().forEach(ch -> {
|
||||||
ch.write(headerBuf.retainedDuplicate());
|
ch.write(headerBuf.retainedDuplicate());
|
||||||
ch.write(checksumBuf.retainedDuplicate());
|
ch.write(checksumBuf.retainedDuplicate());
|
||||||
|
@ -514,7 +517,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
|
||||||
}
|
}
|
||||||
trailingPartialChunkLength = dataLen % summer.getBytesPerChecksum();
|
trailingPartialChunkLength = dataLen % summer.getBytesPerChecksum();
|
||||||
ByteBuf newBuf = alloc.directBuffer(sendBufSizePRedictor.guess(dataLen))
|
ByteBuf newBuf = alloc.directBuffer(sendBufSizePRedictor.guess(dataLen))
|
||||||
.ensureWritable(trailingPartialChunkLength);
|
.ensureWritable(trailingPartialChunkLength);
|
||||||
if (trailingPartialChunkLength != 0) {
|
if (trailingPartialChunkLength != 0) {
|
||||||
buf.readerIndex(dataLen - trailingPartialChunkLength).readBytes(newBuf,
|
buf.readerIndex(dataLen - trailingPartialChunkLength).readBytes(newBuf,
|
||||||
trailingPartialChunkLength);
|
trailingPartialChunkLength);
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -116,7 +116,7 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public final class FanOutOneBlockAsyncDFSOutputHelper {
|
public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
private static final Logger LOG =
|
private static final Logger LOG =
|
||||||
LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class);
|
LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputHelper.class);
|
||||||
|
|
||||||
private FanOutOneBlockAsyncDFSOutputHelper() {
|
private FanOutOneBlockAsyncDFSOutputHelper() {
|
||||||
}
|
}
|
||||||
|
@ -145,9 +145,8 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
// helper class for creating files.
|
// helper class for creating files.
|
||||||
private interface FileCreator {
|
private interface FileCreator {
|
||||||
default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission masked,
|
default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission masked,
|
||||||
String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent,
|
String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication,
|
||||||
short replication, long blockSize, CryptoProtocolVersion[] supportedVersions)
|
long blockSize, CryptoProtocolVersion[] supportedVersions) throws Exception {
|
||||||
throws Exception {
|
|
||||||
try {
|
try {
|
||||||
return (HdfsFileStatus) createObject(instance, src, masked, clientName, flag, createParent,
|
return (HdfsFileStatus) createObject(instance, src, masked, clientName, flag, createParent,
|
||||||
replication, blockSize, supportedVersions);
|
replication, blockSize, supportedVersions);
|
||||||
|
@ -161,15 +160,15 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
Object createObject(ClientProtocol instance, String src, FsPermission masked, String clientName,
|
Object createObject(ClientProtocol instance, String src, FsPermission masked, String clientName,
|
||||||
EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize,
|
EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize,
|
||||||
CryptoProtocolVersion[] supportedVersions) throws Exception;
|
CryptoProtocolVersion[] supportedVersions) throws Exception;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final FileCreator FILE_CREATOR;
|
private static final FileCreator FILE_CREATOR;
|
||||||
|
|
||||||
private static LeaseManager createLeaseManager() throws NoSuchMethodException {
|
private static LeaseManager createLeaseManager() throws NoSuchMethodException {
|
||||||
Method beginFileLeaseMethod =
|
Method beginFileLeaseMethod =
|
||||||
DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class);
|
DFSClient.class.getDeclaredMethod("beginFileLease", long.class, DFSOutputStream.class);
|
||||||
beginFileLeaseMethod.setAccessible(true);
|
beginFileLeaseMethod.setAccessible(true);
|
||||||
Method endFileLeaseMethod = DFSClient.class.getDeclaredMethod("endFileLease", long.class);
|
Method endFileLeaseMethod = DFSClient.class.getDeclaredMethod("endFileLease", long.class);
|
||||||
endFileLeaseMethod.setAccessible(true);
|
endFileLeaseMethod.setAccessible(true);
|
||||||
|
@ -197,13 +196,13 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
|
|
||||||
private static FileCreator createFileCreator3_3() throws NoSuchMethodException {
|
private static FileCreator createFileCreator3_3() throws NoSuchMethodException {
|
||||||
Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class,
|
Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class,
|
||||||
String.class, EnumSetWritable.class, boolean.class, short.class, long.class,
|
String.class, EnumSetWritable.class, boolean.class, short.class, long.class,
|
||||||
CryptoProtocolVersion[].class, String.class, String.class);
|
CryptoProtocolVersion[].class, String.class, String.class);
|
||||||
|
|
||||||
return (instance, src, masked, clientName, flag, createParent, replication, blockSize,
|
return (instance, src, masked, clientName, flag, createParent, replication, blockSize,
|
||||||
supportedVersions) -> {
|
supportedVersions) -> {
|
||||||
return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag,
|
return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag,
|
||||||
createParent, replication, blockSize, supportedVersions, null, null);
|
createParent, replication, blockSize, supportedVersions, null, null);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,7 +212,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
CryptoProtocolVersion[].class, String.class);
|
CryptoProtocolVersion[].class, String.class);
|
||||||
|
|
||||||
return (instance, src, masked, clientName, flag, createParent, replication, blockSize,
|
return (instance, src, masked, clientName, flag, createParent, replication, blockSize,
|
||||||
supportedVersions) -> {
|
supportedVersions) -> {
|
||||||
return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag,
|
return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag,
|
||||||
createParent, replication, blockSize, supportedVersions, null);
|
createParent, replication, blockSize, supportedVersions, null);
|
||||||
};
|
};
|
||||||
|
@ -249,9 +248,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
LEASE_MANAGER = createLeaseManager();
|
LEASE_MANAGER = createLeaseManager();
|
||||||
FILE_CREATOR = createFileCreator();
|
FILE_CREATOR = createFileCreator();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
String msg = "Couldn't properly initialize access to HDFS internals. Please " +
|
String msg = "Couldn't properly initialize access to HDFS internals. Please "
|
||||||
"update your WAL Provider to not make use of the 'asyncfs' provider. See " +
|
+ "update your WAL Provider to not make use of the 'asyncfs' provider. See "
|
||||||
"HBASE-16110 for more information.";
|
+ "HBASE-16110 for more information.";
|
||||||
LOG.error(msg, e);
|
LOG.error(msg, e);
|
||||||
throw new Error(msg, e);
|
throw new Error(msg, e);
|
||||||
}
|
}
|
||||||
|
@ -282,7 +281,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void processWriteBlockResponse(Channel channel, DatanodeInfo dnInfo,
|
private static void processWriteBlockResponse(Channel channel, DatanodeInfo dnInfo,
|
||||||
Promise<Channel> promise, int timeoutMs) {
|
Promise<Channel> promise, int timeoutMs) {
|
||||||
channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS),
|
channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS),
|
||||||
new ProtobufVarint32FrameDecoder(),
|
new ProtobufVarint32FrameDecoder(),
|
||||||
new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()),
|
new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()),
|
||||||
|
@ -290,7 +289,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp)
|
protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
Status pipelineStatus = resp.getStatus();
|
Status pipelineStatus = resp.getStatus();
|
||||||
if (PipelineAck.isRestartOOBStatus(pipelineStatus)) {
|
if (PipelineAck.isRestartOOBStatus(pipelineStatus)) {
|
||||||
throw new IOException("datanode " + dnInfo + " is restarting");
|
throw new IOException("datanode " + dnInfo + " is restarting");
|
||||||
|
@ -298,11 +297,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink();
|
String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink();
|
||||||
if (resp.getStatus() != Status.SUCCESS) {
|
if (resp.getStatus() != Status.SUCCESS) {
|
||||||
if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) {
|
if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) {
|
||||||
throw new InvalidBlockTokenException("Got access token error" + ", status message " +
|
throw new InvalidBlockTokenException("Got access token error" + ", status message "
|
||||||
resp.getMessage() + ", " + logInfo);
|
+ resp.getMessage() + ", " + logInfo);
|
||||||
} else {
|
} else {
|
||||||
throw new IOException("Got error" + ", status=" + resp.getStatus().name() +
|
throw new IOException("Got error" + ", status=" + resp.getStatus().name()
|
||||||
", status message " + resp.getMessage() + ", " + logInfo);
|
+ ", status message " + resp.getMessage() + ", " + logInfo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// success
|
// success
|
||||||
|
@ -329,7 +328,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
|
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
|
||||||
if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == READER_IDLE) {
|
if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == READER_IDLE) {
|
||||||
promise
|
promise
|
||||||
.tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response"));
|
.tryFailure(new IOException("Timeout(" + timeoutMs + "ms) waiting for response"));
|
||||||
} else {
|
} else {
|
||||||
super.userEventTriggered(ctx, evt);
|
super.userEventTriggered(ctx, evt);
|
||||||
}
|
}
|
||||||
|
@ -343,7 +342,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void requestWriteBlock(Channel channel, StorageType storageType,
|
private static void requestWriteBlock(Channel channel, StorageType storageType,
|
||||||
OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException {
|
OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException {
|
||||||
OpWriteBlockProto proto =
|
OpWriteBlockProto proto =
|
||||||
writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build();
|
writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build();
|
||||||
int protoLen = proto.getSerializedSize();
|
int protoLen = proto.getSerializedSize();
|
||||||
|
@ -356,9 +355,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo,
|
private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo,
|
||||||
StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs,
|
StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs,
|
||||||
DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise)
|
DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Promise<Void> saslPromise = channel.eventLoop().newPromise();
|
Promise<Void> saslPromise = channel.eventLoop().newPromise();
|
||||||
trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
|
trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
|
||||||
saslPromise.addListener(new FutureListener<Void>() {
|
saslPromise.addListener(new FutureListener<Void>() {
|
||||||
|
@ -377,13 +376,13 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client,
|
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client,
|
||||||
String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS,
|
String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS,
|
||||||
BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup,
|
BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup,
|
||||||
Class<? extends Channel> channelClass) {
|
Class<? extends Channel> channelClass) {
|
||||||
StorageType[] storageTypes = locatedBlock.getStorageTypes();
|
StorageType[] storageTypes = locatedBlock.getStorageTypes();
|
||||||
DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
|
DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
|
||||||
boolean connectToDnViaHostname =
|
boolean connectToDnViaHostname =
|
||||||
conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
|
conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
|
||||||
int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
|
int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
|
||||||
ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
|
ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
|
||||||
blockCopy.setNumBytes(locatedBlock.getBlockSize());
|
blockCopy.setNumBytes(locatedBlock.getBlockSize());
|
||||||
|
@ -392,11 +391,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
.setToken(PBHelperClient.convert(locatedBlock.getBlockToken())))
|
.setToken(PBHelperClient.convert(locatedBlock.getBlockToken())))
|
||||||
.setClientName(clientName).build();
|
.setClientName(clientName).build();
|
||||||
ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
|
ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
|
||||||
OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder()
|
OpWriteBlockProto.Builder writeBlockProtoBuilder =
|
||||||
.setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()))
|
OpWriteBlockProto.newBuilder().setHeader(header)
|
||||||
.setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes())
|
.setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1)
|
||||||
.setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS)
|
.setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd)
|
||||||
.setRequestedChecksum(checksumProto)
|
.setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto)
|
||||||
.setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
|
.setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
|
||||||
List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
|
List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
|
||||||
for (int i = 0; i < datanodeInfos.length; i++) {
|
for (int i = 0; i < datanodeInfos.length; i++) {
|
||||||
|
@ -406,26 +405,26 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
futureList.add(promise);
|
futureList.add(promise);
|
||||||
String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
|
String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
|
||||||
new Bootstrap().group(eventLoopGroup).channel(channelClass)
|
new Bootstrap().group(eventLoopGroup).channel(channelClass)
|
||||||
.option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {
|
.option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void initChannel(Channel ch) throws Exception {
|
protected void initChannel(Channel ch) throws Exception {
|
||||||
// we need to get the remote address of the channel so we can only move on after
|
// we need to get the remote address of the channel so we can only move on after
|
||||||
// channel connected. Leave an empty implementation here because netty does not allow
|
// channel connected. Leave an empty implementation here because netty does not allow
|
||||||
// a null handler.
|
// a null handler.
|
||||||
}
|
}
|
||||||
}).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {
|
}).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void operationComplete(ChannelFuture future) throws Exception {
|
public void operationComplete(ChannelFuture future) throws Exception {
|
||||||
if (future.isSuccess()) {
|
if (future.isSuccess()) {
|
||||||
initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder,
|
initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder,
|
||||||
timeoutMs, client, locatedBlock.getBlockToken(), promise);
|
timeoutMs, client, locatedBlock.getBlockToken(), promise);
|
||||||
} else {
|
} else {
|
||||||
promise.tryFailure(future.cause());
|
promise.tryFailure(future.cause());
|
||||||
}
|
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
return futureList;
|
return futureList;
|
||||||
}
|
}
|
||||||
|
@ -453,21 +452,21 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src,
|
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src,
|
||||||
boolean overwrite, boolean createParent, short replication, long blockSize,
|
boolean overwrite, boolean createParent, short replication, long blockSize,
|
||||||
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass,
|
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor)
|
||||||
StreamSlowMonitor monitor) throws IOException {
|
throws IOException {
|
||||||
Configuration conf = dfs.getConf();
|
Configuration conf = dfs.getConf();
|
||||||
DFSClient client = dfs.getClient();
|
DFSClient client = dfs.getClient();
|
||||||
String clientName = client.getClientName();
|
String clientName = client.getClientName();
|
||||||
ClientProtocol namenode = client.getNamenode();
|
ClientProtocol namenode = client.getNamenode();
|
||||||
int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES,
|
int createMaxRetries =
|
||||||
DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
|
conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
|
||||||
ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager();
|
ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager();
|
||||||
Set<DatanodeInfo> toExcludeNodes =
|
Set<DatanodeInfo> toExcludeNodes =
|
||||||
new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet());
|
new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet());
|
||||||
for (int retry = 0;; retry++) {
|
for (int retry = 0;; retry++) {
|
||||||
LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src,
|
LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src,
|
||||||
toExcludeNodes, retry);
|
toExcludeNodes, retry);
|
||||||
HdfsFileStatus stat;
|
HdfsFileStatus stat;
|
||||||
try {
|
try {
|
||||||
stat = FILE_CREATOR.create(namenode, src,
|
stat = FILE_CREATOR.create(namenode, src,
|
||||||
|
@ -556,14 +555,14 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
* inside an {@link EventLoop}.
|
* inside an {@link EventLoop}.
|
||||||
*/
|
*/
|
||||||
public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f,
|
public static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, Path f,
|
||||||
boolean overwrite, boolean createParent, short replication, long blockSize,
|
boolean overwrite, boolean createParent, short replication, long blockSize,
|
||||||
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass,
|
EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass,
|
||||||
final StreamSlowMonitor monitor) throws IOException {
|
final StreamSlowMonitor monitor) throws IOException {
|
||||||
return new FileSystemLinkResolver<FanOutOneBlockAsyncDFSOutput>() {
|
return new FileSystemLinkResolver<FanOutOneBlockAsyncDFSOutput>() {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FanOutOneBlockAsyncDFSOutput doCall(Path p)
|
public FanOutOneBlockAsyncDFSOutput doCall(Path p)
|
||||||
throws IOException, UnresolvedLinkException {
|
throws IOException, UnresolvedLinkException {
|
||||||
return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication,
|
return createOutput(dfs, p.toUri().getPath(), overwrite, createParent, replication,
|
||||||
blockSize, eventLoopGroup, channelClass, monitor);
|
blockSize, eventLoopGroup, channelClass, monitor);
|
||||||
}
|
}
|
||||||
|
@ -583,7 +582,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void completeFile(DFSClient client, ClientProtocol namenode, String src, String clientName,
|
static void completeFile(DFSClient client, ClientProtocol namenode, String src, String clientName,
|
||||||
ExtendedBlock block, long fileId) {
|
ExtendedBlock block, long fileId) {
|
||||||
for (int retry = 0;; retry++) {
|
for (int retry = 0;; retry++) {
|
||||||
try {
|
try {
|
||||||
if (namenode.complete(src, clientName, block, fileId)) {
|
if (namenode.complete(src, clientName, block, fileId)) {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -104,7 +104,7 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
private static final Logger LOG =
|
private static final Logger LOG =
|
||||||
LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputSaslHelper.class);
|
LoggerFactory.getLogger(FanOutOneBlockAsyncDFSOutputSaslHelper.class);
|
||||||
|
|
||||||
private FanOutOneBlockAsyncDFSOutputSaslHelper() {
|
private FanOutOneBlockAsyncDFSOutputSaslHelper() {
|
||||||
}
|
}
|
||||||
|
@ -129,21 +129,21 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
private interface TransparentCryptoHelper {
|
private interface TransparentCryptoHelper {
|
||||||
|
|
||||||
Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client)
|
Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final TransparentCryptoHelper TRANSPARENT_CRYPTO_HELPER;
|
private static final TransparentCryptoHelper TRANSPARENT_CRYPTO_HELPER;
|
||||||
|
|
||||||
private static SaslAdaptor createSaslAdaptor()
|
private static SaslAdaptor createSaslAdaptor()
|
||||||
throws NoSuchFieldException, NoSuchMethodException {
|
throws NoSuchFieldException, NoSuchMethodException {
|
||||||
Field saslPropsResolverField =
|
Field saslPropsResolverField =
|
||||||
SaslDataTransferClient.class.getDeclaredField("saslPropsResolver");
|
SaslDataTransferClient.class.getDeclaredField("saslPropsResolver");
|
||||||
saslPropsResolverField.setAccessible(true);
|
saslPropsResolverField.setAccessible(true);
|
||||||
Field trustedChannelResolverField =
|
Field trustedChannelResolverField =
|
||||||
SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver");
|
SaslDataTransferClient.class.getDeclaredField("trustedChannelResolver");
|
||||||
trustedChannelResolverField.setAccessible(true);
|
trustedChannelResolverField.setAccessible(true);
|
||||||
Field fallbackToSimpleAuthField =
|
Field fallbackToSimpleAuthField =
|
||||||
SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth");
|
SaslDataTransferClient.class.getDeclaredField("fallbackToSimpleAuth");
|
||||||
fallbackToSimpleAuthField.setAccessible(true);
|
fallbackToSimpleAuthField.setAccessible(true);
|
||||||
return new SaslAdaptor() {
|
return new SaslAdaptor() {
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static TransparentCryptoHelper createTransparentCryptoHelperWithoutHDFS12396()
|
private static TransparentCryptoHelper createTransparentCryptoHelperWithoutHDFS12396()
|
||||||
throws NoSuchMethodException {
|
throws NoSuchMethodException {
|
||||||
Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class
|
Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class
|
||||||
.getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class);
|
.getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class);
|
||||||
decryptEncryptedDataEncryptionKeyMethod.setAccessible(true);
|
decryptEncryptedDataEncryptionKeyMethod.setAccessible(true);
|
||||||
|
@ -185,7 +185,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo,
|
public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo,
|
||||||
DFSClient client) throws IOException {
|
DFSClient client) throws IOException {
|
||||||
try {
|
try {
|
||||||
KeyVersion decryptedKey =
|
KeyVersion decryptedKey =
|
||||||
(KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo);
|
(KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo);
|
||||||
|
@ -206,7 +206,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static TransparentCryptoHelper createTransparentCryptoHelperWithHDFS12396()
|
private static TransparentCryptoHelper createTransparentCryptoHelperWithHDFS12396()
|
||||||
throws ClassNotFoundException, NoSuchMethodException {
|
throws ClassNotFoundException, NoSuchMethodException {
|
||||||
Class<?> hdfsKMSUtilCls = Class.forName("org.apache.hadoop.hdfs.HdfsKMSUtil");
|
Class<?> hdfsKMSUtilCls = Class.forName("org.apache.hadoop.hdfs.HdfsKMSUtil");
|
||||||
Method decryptEncryptedDataEncryptionKeyMethod = hdfsKMSUtilCls.getDeclaredMethod(
|
Method decryptEncryptedDataEncryptionKeyMethod = hdfsKMSUtilCls.getDeclaredMethod(
|
||||||
"decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class, KeyProvider.class);
|
"decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class, KeyProvider.class);
|
||||||
|
@ -215,7 +215,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo,
|
public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo,
|
||||||
DFSClient client) throws IOException {
|
DFSClient client) throws IOException {
|
||||||
try {
|
try {
|
||||||
KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod
|
KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod
|
||||||
.invoke(null, feInfo, client.getKeyProvider());
|
.invoke(null, feInfo, client.getKeyProvider());
|
||||||
|
@ -236,12 +236,12 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static TransparentCryptoHelper createTransparentCryptoHelper()
|
private static TransparentCryptoHelper createTransparentCryptoHelper()
|
||||||
throws NoSuchMethodException, ClassNotFoundException {
|
throws NoSuchMethodException, ClassNotFoundException {
|
||||||
try {
|
try {
|
||||||
return createTransparentCryptoHelperWithoutHDFS12396();
|
return createTransparentCryptoHelperWithoutHDFS12396();
|
||||||
} catch (NoSuchMethodException e) {
|
} catch (NoSuchMethodException e) {
|
||||||
LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," +
|
LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient,"
|
||||||
" should be hadoop version with HDFS-12396", e);
|
+ " should be hadoop version with HDFS-12396", e);
|
||||||
}
|
}
|
||||||
return createTransparentCryptoHelperWithHDFS12396();
|
return createTransparentCryptoHelperWithHDFS12396();
|
||||||
}
|
}
|
||||||
|
@ -252,8 +252,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
TRANSPARENT_CRYPTO_HELPER = createTransparentCryptoHelper();
|
TRANSPARENT_CRYPTO_HELPER = createTransparentCryptoHelper();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
String msg = "Couldn't properly initialize access to HDFS internals. Please "
|
String msg = "Couldn't properly initialize access to HDFS internals. Please "
|
||||||
+ "update your WAL Provider to not make use of the 'asyncfs' provider. See "
|
+ "update your WAL Provider to not make use of the 'asyncfs' provider. See "
|
||||||
+ "HBASE-16110 for more information.";
|
+ "HBASE-16110 for more information.";
|
||||||
LOG.error(msg, e);
|
LOG.error(msg, e);
|
||||||
throw new Error(msg, e);
|
throw new Error(msg, e);
|
||||||
}
|
}
|
||||||
|
@ -324,8 +324,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
private int step = 0;
|
private int step = 0;
|
||||||
|
|
||||||
public SaslNegotiateHandler(Configuration conf, String username, char[] password,
|
public SaslNegotiateHandler(Configuration conf, String username, char[] password,
|
||||||
Map<String, String> saslProps, int timeoutMs, Promise<Void> promise,
|
Map<String, String> saslProps, int timeoutMs, Promise<Void> promise, DFSClient dfsClient)
|
||||||
DFSClient dfsClient) throws SaslException {
|
throws SaslException {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.saslProps = saslProps;
|
this.saslProps = saslProps;
|
||||||
this.saslClient = Sasl.createSaslClient(new String[] { MECHANISM }, username, PROTOCOL,
|
this.saslClient = Sasl.createSaslClient(new String[] { MECHANISM }, username, PROTOCOL,
|
||||||
|
@ -355,8 +355,8 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty.
|
* The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. After
|
||||||
* After Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*.
|
* Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*.
|
||||||
* Use Reflection to check which ones to use.
|
* Use Reflection to check which ones to use.
|
||||||
*/
|
*/
|
||||||
private static class BuilderPayloadSetter {
|
private static class BuilderPayloadSetter {
|
||||||
|
@ -366,13 +366,11 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
/**
|
/**
|
||||||
* Create a ByteString from byte array without copying (wrap), and then set it as the payload
|
* Create a ByteString from byte array without copying (wrap), and then set it as the payload
|
||||||
* for the builder.
|
* for the builder.
|
||||||
*
|
|
||||||
* @param builder builder for HDFS DataTransferEncryptorMessage.
|
* @param builder builder for HDFS DataTransferEncryptorMessage.
|
||||||
* @param payload byte array of payload.
|
* @param payload byte array of payload. n
|
||||||
* @throws IOException
|
|
||||||
*/
|
*/
|
||||||
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, byte[] payload)
|
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder,
|
||||||
throws IOException {
|
byte[] payload) throws IOException {
|
||||||
Object byteStringObject;
|
Object byteStringObject;
|
||||||
try {
|
try {
|
||||||
// byteStringObject = new LiteralByteString(payload);
|
// byteStringObject = new LiteralByteString(payload);
|
||||||
|
@ -396,18 +394,18 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
try {
|
try {
|
||||||
// See if it can load the relocated ByteString, which comes from hadoop-thirdparty.
|
// See if it can load the relocated ByteString, which comes from hadoop-thirdparty.
|
||||||
byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString");
|
byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString");
|
||||||
LOG.debug("Found relocated ByteString class from hadoop-thirdparty." +
|
LOG.debug("Found relocated ByteString class from hadoop-thirdparty."
|
||||||
" Assuming this is Hadoop 3.3.0+.");
|
+ " Assuming this is Hadoop 3.3.0+.");
|
||||||
} catch (ClassNotFoundException e) {
|
} catch (ClassNotFoundException e) {
|
||||||
LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." +
|
LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty."
|
||||||
" Assuming this is below Hadoop 3.3.0", e);
|
+ " Assuming this is below Hadoop 3.3.0", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
// LiteralByteString is a package private class in protobuf. Make it accessible.
|
// LiteralByteString is a package private class in protobuf. Make it accessible.
|
||||||
Class<?> literalByteStringClass;
|
Class<?> literalByteStringClass;
|
||||||
try {
|
try {
|
||||||
literalByteStringClass = Class.forName(
|
literalByteStringClass =
|
||||||
"org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString");
|
Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString");
|
||||||
LOG.debug("Shaded LiteralByteString from hadoop-thirdparty is found.");
|
LOG.debug("Shaded LiteralByteString from hadoop-thirdparty is found.");
|
||||||
} catch (ClassNotFoundException e) {
|
} catch (ClassNotFoundException e) {
|
||||||
try {
|
try {
|
||||||
|
@ -435,9 +433,9 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void sendSaslMessage(ChannelHandlerContext ctx, byte[] payload,
|
private void sendSaslMessage(ChannelHandlerContext ctx, byte[] payload,
|
||||||
List<CipherOption> options) throws IOException {
|
List<CipherOption> options) throws IOException {
|
||||||
DataTransferEncryptorMessageProto.Builder builder =
|
DataTransferEncryptorMessageProto.Builder builder =
|
||||||
DataTransferEncryptorMessageProto.newBuilder();
|
DataTransferEncryptorMessageProto.newBuilder();
|
||||||
builder.setStatus(DataTransferEncryptorStatus.SUCCESS);
|
builder.setStatus(DataTransferEncryptorStatus.SUCCESS);
|
||||||
if (payload != null) {
|
if (payload != null) {
|
||||||
BuilderPayloadSetter.wrapAndSetPayload(builder, payload);
|
BuilderPayloadSetter.wrapAndSetPayload(builder, payload);
|
||||||
|
@ -486,7 +484,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
|
|
||||||
private boolean requestedQopContainsPrivacy() {
|
private boolean requestedQopContainsPrivacy() {
|
||||||
Set<String> requestedQop =
|
Set<String> requestedQop =
|
||||||
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
|
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
|
||||||
return requestedQop.contains("auth-conf");
|
return requestedQop.contains("auth-conf");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -495,15 +493,14 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
throw new IOException("Failed to complete SASL handshake");
|
throw new IOException("Failed to complete SASL handshake");
|
||||||
}
|
}
|
||||||
Set<String> requestedQop =
|
Set<String> requestedQop =
|
||||||
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
|
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
|
||||||
String negotiatedQop = getNegotiatedQop();
|
String negotiatedQop = getNegotiatedQop();
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Verifying QOP, requested QOP = " + requestedQop + ", negotiated QOP = " + negotiatedQop);
|
"Verifying QOP, requested QOP = " + requestedQop + ", negotiated QOP = " + negotiatedQop);
|
||||||
if (!requestedQop.contains(negotiatedQop)) {
|
if (!requestedQop.contains(negotiatedQop)) {
|
||||||
throw new IOException(String.format("SASL handshake completed, but "
|
throw new IOException(String.format("SASL handshake completed, but "
|
||||||
+ "channel does not have acceptable quality of protection, "
|
+ "channel does not have acceptable quality of protection, "
|
||||||
+ "requested = %s, negotiated = %s",
|
+ "requested = %s, negotiated = %s", requestedQop, negotiatedQop));
|
||||||
requestedQop, negotiatedQop));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -522,13 +519,13 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
outKey = saslClient.unwrap(outKey, 0, outKey.length);
|
outKey = saslClient.unwrap(outKey, 0, outKey.length);
|
||||||
}
|
}
|
||||||
return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(), outKey,
|
return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(), outKey,
|
||||||
option.getOutIv());
|
option.getOutIv());
|
||||||
}
|
}
|
||||||
|
|
||||||
private CipherOption getCipherOption(DataTransferEncryptorMessageProto proto,
|
private CipherOption getCipherOption(DataTransferEncryptorMessageProto proto,
|
||||||
boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException {
|
boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException {
|
||||||
List<CipherOption> cipherOptions =
|
List<CipherOption> cipherOptions =
|
||||||
PBHelperClient.convertCipherOptionProtos(proto.getCipherOptionList());
|
PBHelperClient.convertCipherOptionProtos(proto.getCipherOptionList());
|
||||||
if (cipherOptions == null || cipherOptions.isEmpty()) {
|
if (cipherOptions == null || cipherOptions.isEmpty()) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -558,7 +555,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
assert response == null;
|
assert response == null;
|
||||||
checkSaslComplete();
|
checkSaslComplete();
|
||||||
CipherOption cipherOption =
|
CipherOption cipherOption =
|
||||||
getCipherOption(proto, isNegotiatedQopPrivacy(), saslClient);
|
getCipherOption(proto, isNegotiatedQopPrivacy(), saslClient);
|
||||||
ChannelPipeline p = ctx.pipeline();
|
ChannelPipeline p = ctx.pipeline();
|
||||||
while (p.first() != null) {
|
while (p.first() != null) {
|
||||||
p.removeFirst();
|
p.removeFirst();
|
||||||
|
@ -639,7 +636,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
|
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
if (msg instanceof ByteBuf) {
|
if (msg instanceof ByteBuf) {
|
||||||
ByteBuf buf = (ByteBuf) msg;
|
ByteBuf buf = (ByteBuf) msg;
|
||||||
cBuf.addComponent(buf);
|
cBuf.addComponent(buf);
|
||||||
|
@ -676,7 +673,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
private final Decryptor decryptor;
|
private final Decryptor decryptor;
|
||||||
|
|
||||||
public DecryptHandler(CryptoCodec codec, byte[] key, byte[] iv)
|
public DecryptHandler(CryptoCodec codec, byte[] key, byte[] iv)
|
||||||
throws GeneralSecurityException, IOException {
|
throws GeneralSecurityException, IOException {
|
||||||
this.decryptor = codec.createDecryptor();
|
this.decryptor = codec.createDecryptor();
|
||||||
this.decryptor.init(key, Arrays.copyOf(iv, iv.length));
|
this.decryptor.init(key, Arrays.copyOf(iv, iv.length));
|
||||||
}
|
}
|
||||||
|
@ -709,14 +706,14 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
private final Encryptor encryptor;
|
private final Encryptor encryptor;
|
||||||
|
|
||||||
public EncryptHandler(CryptoCodec codec, byte[] key, byte[] iv)
|
public EncryptHandler(CryptoCodec codec, byte[] key, byte[] iv)
|
||||||
throws GeneralSecurityException, IOException {
|
throws GeneralSecurityException, IOException {
|
||||||
this.encryptor = codec.createEncryptor();
|
this.encryptor = codec.createEncryptor();
|
||||||
this.encryptor.init(key, Arrays.copyOf(iv, iv.length));
|
this.encryptor.init(key, Arrays.copyOf(iv, iv.length));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect)
|
protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
if (preferDirect) {
|
if (preferDirect) {
|
||||||
return ctx.alloc().directBuffer(msg.readableBytes());
|
return ctx.alloc().directBuffer(msg.readableBytes());
|
||||||
} else {
|
} else {
|
||||||
|
@ -747,7 +744,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
|
|
||||||
private static String getUserNameFromEncryptionKey(DataEncryptionKey encryptionKey) {
|
private static String getUserNameFromEncryptionKey(DataEncryptionKey encryptionKey) {
|
||||||
return encryptionKey.keyId + NAME_DELIMITER + encryptionKey.blockPoolId + NAME_DELIMITER
|
return encryptionKey.keyId + NAME_DELIMITER + encryptionKey.blockPoolId + NAME_DELIMITER
|
||||||
+ Base64.getEncoder().encodeToString(encryptionKey.nonce);
|
+ Base64.getEncoder().encodeToString(encryptionKey.nonce);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static char[] encryptionKeyToPassword(byte[] encryptionKey) {
|
private static char[] encryptionKeyToPassword(byte[] encryptionKey) {
|
||||||
|
@ -771,26 +768,26 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void doSaslNegotiation(Configuration conf, Channel channel, int timeoutMs,
|
private static void doSaslNegotiation(Configuration conf, Channel channel, int timeoutMs,
|
||||||
String username, char[] password, Map<String, String> saslProps, Promise<Void> saslPromise,
|
String username, char[] password, Map<String, String> saslProps, Promise<Void> saslPromise,
|
||||||
DFSClient dfsClient) {
|
DFSClient dfsClient) {
|
||||||
try {
|
try {
|
||||||
channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS),
|
channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS),
|
||||||
new ProtobufVarint32FrameDecoder(),
|
new ProtobufVarint32FrameDecoder(),
|
||||||
new ProtobufDecoder(DataTransferEncryptorMessageProto.getDefaultInstance()),
|
new ProtobufDecoder(DataTransferEncryptorMessageProto.getDefaultInstance()),
|
||||||
new SaslNegotiateHandler(conf, username, password, saslProps, timeoutMs, saslPromise,
|
new SaslNegotiateHandler(conf, username, password, saslProps, timeoutMs, saslPromise,
|
||||||
dfsClient));
|
dfsClient));
|
||||||
} catch (SaslException e) {
|
} catch (SaslException e) {
|
||||||
saslPromise.tryFailure(e);
|
saslPromise.tryFailure(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo,
|
static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo,
|
||||||
int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken,
|
int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken,
|
||||||
Promise<Void> saslPromise) throws IOException {
|
Promise<Void> saslPromise) throws IOException {
|
||||||
SaslDataTransferClient saslClient = client.getSaslDataTransferClient();
|
SaslDataTransferClient saslClient = client.getSaslDataTransferClient();
|
||||||
SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient);
|
SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient);
|
||||||
TrustedChannelResolver trustedChannelResolver =
|
TrustedChannelResolver trustedChannelResolver =
|
||||||
SASL_ADAPTOR.getTrustedChannelResolver(saslClient);
|
SASL_ADAPTOR.getTrustedChannelResolver(saslClient);
|
||||||
AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient);
|
AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient);
|
||||||
InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress();
|
InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress();
|
||||||
if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) {
|
if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) {
|
||||||
|
@ -805,24 +802,23 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
}
|
}
|
||||||
doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey),
|
doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey),
|
||||||
encryptionKeyToPassword(encryptionKey.encryptionKey),
|
encryptionKeyToPassword(encryptionKey.encryptionKey),
|
||||||
createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise,
|
createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, client);
|
||||||
client);
|
|
||||||
} else if (!UserGroupInformation.isSecurityEnabled()) {
|
} else if (!UserGroupInformation.isSecurityEnabled()) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr
|
LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr
|
||||||
+ ", datanodeId = " + dnInfo);
|
+ ", datanodeId = " + dnInfo);
|
||||||
}
|
}
|
||||||
saslPromise.trySuccess(null);
|
saslPromise.trySuccess(null);
|
||||||
} else if (dnInfo.getXferPort() < 1024) {
|
} else if (dnInfo.getXferPort() < 1024) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("SASL client skipping handshake in secured configuration with "
|
LOG.debug("SASL client skipping handshake in secured configuration with "
|
||||||
+ "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
|
+ "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
|
||||||
}
|
}
|
||||||
saslPromise.trySuccess(null);
|
saslPromise.trySuccess(null);
|
||||||
} else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
|
} else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("SASL client skipping handshake in secured configuration with "
|
LOG.debug("SASL client skipping handshake in secured configuration with "
|
||||||
+ "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
|
+ "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
|
||||||
}
|
}
|
||||||
saslPromise.trySuccess(null);
|
saslPromise.trySuccess(null);
|
||||||
} else if (saslPropsResolver != null) {
|
} else if (saslPropsResolver != null) {
|
||||||
|
@ -832,21 +828,21 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
|
||||||
}
|
}
|
||||||
doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken),
|
doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken),
|
||||||
buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise,
|
buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise,
|
||||||
client);
|
client);
|
||||||
} else {
|
} else {
|
||||||
// It's a secured cluster using non-privileged ports, but no SASL. The only way this can
|
// It's a secured cluster using non-privileged ports, but no SASL. The only way this can
|
||||||
// happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare
|
// happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare
|
||||||
// edge case.
|
// edge case.
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("SASL client skipping handshake in secured configuration with no SASL "
|
LOG.debug("SASL client skipping handshake in secured configuration with no SASL "
|
||||||
+ "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
|
+ "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
|
||||||
}
|
}
|
||||||
saslPromise.trySuccess(null);
|
saslPromise.trySuccess(null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static Encryptor createEncryptor(Configuration conf, HdfsFileStatus stat, DFSClient client)
|
static Encryptor createEncryptor(Configuration conf, HdfsFileStatus stat, DFSClient client)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
FileEncryptionInfo feInfo = stat.getFileEncryptionInfo();
|
FileEncryptionInfo feInfo = stat.getFileEncryptionInfo();
|
||||||
if (feInfo == null) {
|
if (feInfo == null) {
|
||||||
return null;
|
return null;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -17,33 +17,29 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.io.asyncfs;
|
package org.apache.hadoop.hbase.io.asyncfs;
|
||||||
|
|
||||||
|
import java.lang.reflect.InvocationTargetException;
|
||||||
|
import java.lang.reflect.Method;
|
||||||
|
import java.util.List;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
|
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
|
||||||
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil;
|
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil;
|
||||||
import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
|
import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
|
||||||
import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageDecoder;
|
import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageDecoder;
|
||||||
import org.apache.hbase.thirdparty.io.netty.util.internal.ObjectUtil;
|
import org.apache.hbase.thirdparty.io.netty.util.internal.ObjectUtil;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
|
||||||
import java.lang.reflect.Method;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder.
|
* Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. The Netty's ProtobufDecode
|
||||||
* The Netty's ProtobufDecode supports unshaded protobuf messages (com.google.protobuf).
|
* supports unshaded protobuf messages (com.google.protobuf). Hadoop 3.3.0 and above relocates
|
||||||
*
|
* protobuf classes to a shaded jar (hadoop-thirdparty), and so we must use reflection to detect
|
||||||
* Hadoop 3.3.0 and above relocates protobuf classes to a shaded jar (hadoop-thirdparty), and
|
* which one (relocated or not) to use. Do not use this to process HBase's shaded protobuf messages.
|
||||||
* so we must use reflection to detect which one (relocated or not) to use.
|
* This is meant to process the protobuf messages in HDFS for the asyncfs use case.
|
||||||
*
|
*/
|
||||||
* Do not use this to process HBase's shaded protobuf messages. This is meant to process the
|
|
||||||
* protobuf messages in HDFS for the asyncfs use case.
|
|
||||||
* */
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
|
public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
|
||||||
private static final Logger LOG =
|
private static final Logger LOG = LoggerFactory.getLogger(ProtobufDecoder.class);
|
||||||
LoggerFactory.getLogger(ProtobufDecoder.class);
|
|
||||||
|
|
||||||
private static Class<?> protobufMessageLiteClass = null;
|
private static Class<?> protobufMessageLiteClass = null;
|
||||||
private static Class<?> protobufMessageLiteBuilderClass = null;
|
private static Class<?> protobufMessageLiteBuilderClass = null;
|
||||||
|
@ -60,23 +56,22 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
|
||||||
private Object parser;
|
private Object parser;
|
||||||
private Object builder;
|
private Object builder;
|
||||||
|
|
||||||
|
|
||||||
public ProtobufDecoder(Object prototype) {
|
public ProtobufDecoder(Object prototype) {
|
||||||
try {
|
try {
|
||||||
Method getDefaultInstanceForTypeMethod = protobufMessageLiteClass.getMethod(
|
Method getDefaultInstanceForTypeMethod =
|
||||||
"getDefaultInstanceForType");
|
protobufMessageLiteClass.getMethod("getDefaultInstanceForType");
|
||||||
Object prototype1 = getDefaultInstanceForTypeMethod
|
Object prototype1 =
|
||||||
.invoke(ObjectUtil.checkNotNull(prototype, "prototype"));
|
getDefaultInstanceForTypeMethod.invoke(ObjectUtil.checkNotNull(prototype, "prototype"));
|
||||||
|
|
||||||
// parser = prototype.getParserForType()
|
// parser = prototype.getParserForType()
|
||||||
parser = getParserForTypeMethod.invoke(prototype1);
|
parser = getParserForTypeMethod.invoke(prototype1);
|
||||||
parseFromMethod = parser.getClass().getMethod(
|
parseFromMethod =
|
||||||
"parseFrom", byte[].class, int.class, int.class);
|
parser.getClass().getMethod("parseFrom", byte[].class, int.class, int.class);
|
||||||
|
|
||||||
// builder = prototype.newBuilderForType();
|
// builder = prototype.newBuilderForType();
|
||||||
builder = newBuilderForTypeMethod.invoke(prototype1);
|
builder = newBuilderForTypeMethod.invoke(prototype1);
|
||||||
mergeFromMethod = builder.getClass().getMethod(
|
mergeFromMethod =
|
||||||
"mergeFrom", byte[].class, int.class, int.class);
|
builder.getClass().getMethod("mergeFrom", byte[].class, int.class, int.class);
|
||||||
|
|
||||||
// All protobuf message builders inherits from MessageLite.Builder
|
// All protobuf message builders inherits from MessageLite.Builder
|
||||||
buildMethod = protobufMessageLiteBuilderClass.getDeclaredMethod("build");
|
buildMethod = protobufMessageLiteBuilderClass.getDeclaredMethod("build");
|
||||||
|
@ -88,8 +83,7 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void decode(
|
protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
|
||||||
ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
|
|
||||||
int length = msg.readableBytes();
|
int length = msg.readableBytes();
|
||||||
byte[] array;
|
byte[] array;
|
||||||
int offset;
|
int offset;
|
||||||
|
@ -122,8 +116,8 @@ public class ProtobufDecoder extends MessageToMessageDecoder<ByteBuf> {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite");
|
protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite");
|
||||||
protobufMessageLiteBuilderClass = Class.forName(
|
protobufMessageLiteBuilderClass =
|
||||||
"org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder");
|
Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder");
|
||||||
LOG.debug("Hadoop 3.3 and above shades protobuf.");
|
LOG.debug("Hadoop 3.3 and above shades protobuf.");
|
||||||
} catch (ClassNotFoundException e) {
|
} catch (ClassNotFoundException e) {
|
||||||
LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e);
|
LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e);
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -22,7 +22,6 @@ import java.nio.ByteBuffer;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
|
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
|
||||||
|
@ -50,7 +49,7 @@ public class WrapperAsyncFSOutput implements AsyncFSOutput {
|
||||||
public WrapperAsyncFSOutput(Path file, FSDataOutputStream out) {
|
public WrapperAsyncFSOutput(Path file, FSDataOutputStream out) {
|
||||||
this.out = out;
|
this.out = out;
|
||||||
this.executor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true)
|
this.executor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true)
|
||||||
.setNameFormat("AsyncFSOutputFlusher-" + file.toString().replace("%", "%%")).build());
|
.setNameFormat("AsyncFSOutputFlusher-" + file.toString().replace("%", "%%")).build());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -95,8 +94,8 @@ public class WrapperAsyncFSOutput implements AsyncFSOutput {
|
||||||
}
|
}
|
||||||
long pos = out.getPos();
|
long pos = out.getPos();
|
||||||
/**
|
/**
|
||||||
* This flush0 method could only be called by single thread, so here we could
|
* This flush0 method could only be called by single thread, so here we could safely overwrite
|
||||||
* safely overwrite without any synchronization.
|
* without any synchronization.
|
||||||
*/
|
*/
|
||||||
this.syncedLength = pos;
|
this.syncedLength = pos;
|
||||||
future.complete(pos);
|
future.complete(pos);
|
||||||
|
|
|
@ -56,24 +56,23 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
|
||||||
private final int maxExcludeDNCount;
|
private final int maxExcludeDNCount;
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
// This is a map of providerId->StreamSlowMonitor
|
// This is a map of providerId->StreamSlowMonitor
|
||||||
private final Map<String, StreamSlowMonitor> streamSlowMonitors =
|
private final Map<String, StreamSlowMonitor> streamSlowMonitors = new ConcurrentHashMap<>(1);
|
||||||
new ConcurrentHashMap<>(1);
|
|
||||||
|
|
||||||
public ExcludeDatanodeManager(Configuration conf) {
|
public ExcludeDatanodeManager(Configuration conf) {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.maxExcludeDNCount = conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
|
this.maxExcludeDNCount = conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
|
||||||
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT);
|
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT);
|
||||||
this.excludeDNsCache = CacheBuilder.newBuilder()
|
this.excludeDNsCache = CacheBuilder.newBuilder()
|
||||||
.expireAfterWrite(this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY,
|
.expireAfterWrite(
|
||||||
DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS)
|
this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
|
||||||
.maximumSize(this.maxExcludeDNCount)
|
TimeUnit.HOURS)
|
||||||
.build();
|
.maximumSize(this.maxExcludeDNCount).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Try to add a datanode to the regionserver excluding cache
|
* Try to add a datanode to the regionserver excluding cache
|
||||||
* @param datanodeInfo the datanode to be added to the excluded cache
|
* @param datanodeInfo the datanode to be added to the excluded cache
|
||||||
* @param cause the cause that the datanode is hope to be excluded
|
* @param cause the cause that the datanode is hope to be excluded
|
||||||
* @return True if the datanode is added to the regionserver excluding cache, false otherwise
|
* @return True if the datanode is added to the regionserver excluding cache, false otherwise
|
||||||
*/
|
*/
|
||||||
public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) {
|
public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) {
|
||||||
|
@ -85,15 +84,15 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
|
||||||
datanodeInfo, cause, excludeDNsCache.size());
|
datanodeInfo, cause, excludeDNsCache.size());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
LOG.debug("Try add datanode {} to exclude cache by [{}] failed, "
|
LOG.debug(
|
||||||
+ "current exclude DNs are {}", datanodeInfo, cause, getExcludeDNs().keySet());
|
"Try add datanode {} to exclude cache by [{}] failed, " + "current exclude DNs are {}",
|
||||||
|
datanodeInfo, cause, getExcludeDNs().keySet());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public StreamSlowMonitor getStreamSlowMonitor(String name) {
|
public StreamSlowMonitor getStreamSlowMonitor(String name) {
|
||||||
String key = name == null || name.isEmpty() ? "defaultMonitorName" : name;
|
String key = name == null || name.isEmpty() ? "defaultMonitorName" : name;
|
||||||
return streamSlowMonitors
|
return streamSlowMonitors.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this));
|
||||||
.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<DatanodeInfo, Long> getExcludeDNs() {
|
public Map<DatanodeInfo, Long> getExcludeDNs() {
|
||||||
|
@ -105,10 +104,12 @@ public class ExcludeDatanodeManager implements ConfigurationObserver {
|
||||||
for (StreamSlowMonitor monitor : streamSlowMonitors.values()) {
|
for (StreamSlowMonitor monitor : streamSlowMonitors.values()) {
|
||||||
monitor.onConfigurationChange(conf);
|
monitor.onConfigurationChange(conf);
|
||||||
}
|
}
|
||||||
this.excludeDNsCache = CacheBuilder.newBuilder().expireAfterWrite(
|
this.excludeDNsCache = CacheBuilder.newBuilder()
|
||||||
this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
|
.expireAfterWrite(
|
||||||
TimeUnit.HOURS).maximumSize(this.conf
|
this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
|
||||||
.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
|
TimeUnit.HOURS)
|
||||||
|
.maximumSize(this.conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
|
||||||
|
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,18 +38,16 @@ import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
|
||||||
import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
|
import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class for monitor the wal file flush performance.
|
* Class for monitor the wal file flush performance. Each active wal file has a StreamSlowMonitor.
|
||||||
* Each active wal file has a StreamSlowMonitor.
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class StreamSlowMonitor implements ConfigurationObserver {
|
public class StreamSlowMonitor implements ConfigurationObserver {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(StreamSlowMonitor.class);
|
private static final Logger LOG = LoggerFactory.getLogger(StreamSlowMonitor.class);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configure for the min count for a datanode detected slow.
|
* Configure for the min count for a datanode detected slow. If a datanode is detected slow times
|
||||||
* If a datanode is detected slow times up to this count, then it will be added to the exclude
|
* up to this count, then it will be added to the exclude datanode cache by
|
||||||
* datanode cache by {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)}
|
* {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} of this regionsever.
|
||||||
* of this regionsever.
|
|
||||||
*/
|
*/
|
||||||
private static final String WAL_SLOW_DETECT_MIN_COUNT_KEY =
|
private static final String WAL_SLOW_DETECT_MIN_COUNT_KEY =
|
||||||
"hbase.regionserver.async.wal.min.slow.detect.count";
|
"hbase.regionserver.async.wal.min.slow.detect.count";
|
||||||
|
@ -63,9 +61,9 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
||||||
private static final long DEFAULT_WAL_SLOW_DETECT_DATA_TTL = 10 * 60 * 1000; // 10min in ms
|
private static final long DEFAULT_WAL_SLOW_DETECT_DATA_TTL = 10 * 60 * 1000; // 10min in ms
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configure for the speed check of packet min length.
|
* Configure for the speed check of packet min length. For packets whose data length smaller than
|
||||||
* For packets whose data length smaller than this value, check slow by processing time.
|
* this value, check slow by processing time. While for packets whose data length larger than this
|
||||||
* While for packets whose data length larger than this value, check slow by flushing speed.
|
* value, check slow by flushing speed.
|
||||||
*/
|
*/
|
||||||
private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY =
|
private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY =
|
||||||
"hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min";
|
"hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min";
|
||||||
|
@ -73,8 +71,8 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
||||||
private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024;
|
private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configure for the slow packet process time, a duration from send to ACK.
|
* Configure for the slow packet process time, a duration from send to ACK. The processing time
|
||||||
* The processing time check is for packets that data length smaller than
|
* check is for packets that data length smaller than
|
||||||
* {@link StreamSlowMonitor#DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY}
|
* {@link StreamSlowMonitor#DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY}
|
||||||
*/
|
*/
|
||||||
public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY =
|
public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY =
|
||||||
|
@ -105,15 +103,16 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
||||||
private long minLengthForSpeedCheck;
|
private long minLengthForSpeedCheck;
|
||||||
|
|
||||||
public StreamSlowMonitor(Configuration conf, String name,
|
public StreamSlowMonitor(Configuration conf, String name,
|
||||||
ExcludeDatanodeManager excludeDatanodeManager) {
|
ExcludeDatanodeManager excludeDatanodeManager) {
|
||||||
setConf(conf);
|
setConf(conf);
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.excludeDatanodeManager = excludeDatanodeManager;
|
this.excludeDatanodeManager = excludeDatanodeManager;
|
||||||
this.datanodeSlowDataQueue = CacheBuilder.newBuilder()
|
this.datanodeSlowDataQueue = CacheBuilder.newBuilder()
|
||||||
.maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
|
.maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY,
|
||||||
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
|
DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT))
|
||||||
.expireAfterWrite(conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY,
|
.expireAfterWrite(
|
||||||
DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS)
|
conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL),
|
||||||
|
TimeUnit.HOURS)
|
||||||
.build(new CacheLoader<DatanodeInfo, Deque<PacketAckData>>() {
|
.build(new CacheLoader<DatanodeInfo, Deque<PacketAckData>>() {
|
||||||
@Override
|
@Override
|
||||||
public Deque<PacketAckData> load(DatanodeInfo key) throws Exception {
|
public Deque<PacketAckData> load(DatanodeInfo key) throws Exception {
|
||||||
|
@ -129,30 +128,33 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if the packet process time shows that the relevant datanode is a slow node.
|
* Check if the packet process time shows that the relevant datanode is a slow node.
|
||||||
* @param datanodeInfo the datanode that processed the packet
|
* @param datanodeInfo the datanode that processed the packet
|
||||||
* @param packetDataLen the data length of the packet (in bytes)
|
* @param packetDataLen the data length of the packet (in bytes)
|
||||||
* @param processTimeMs the process time (in ms) of the packet on the datanode,
|
* @param processTimeMs the process time (in ms) of the packet on the datanode,
|
||||||
* @param lastAckTimestamp the last acked timestamp of the packet on another datanode
|
* @param lastAckTimestamp the last acked timestamp of the packet on another datanode
|
||||||
* @param unfinished if the packet is unfinished flushed to the datanode replicas
|
* @param unfinished if the packet is unfinished flushed to the datanode replicas
|
||||||
*/
|
*/
|
||||||
public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataLen,
|
public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataLen,
|
||||||
long processTimeMs, long lastAckTimestamp, int unfinished) {
|
long processTimeMs, long lastAckTimestamp, int unfinished) {
|
||||||
long current = EnvironmentEdgeManager.currentTime();
|
long current = EnvironmentEdgeManager.currentTime();
|
||||||
// Here are two conditions used to determine whether a datanode is slow,
|
// Here are two conditions used to determine whether a datanode is slow,
|
||||||
// 1. For small packet, we just have a simple time limit, without considering
|
// 1. For small packet, we just have a simple time limit, without considering
|
||||||
// the size of the packet.
|
// the size of the packet.
|
||||||
// 2. For large packet, we will calculate the speed, and check if the speed is too slow.
|
// 2. For large packet, we will calculate the speed, and check if the speed is too slow.
|
||||||
boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) || (
|
boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs)
|
||||||
packetDataLen > minLengthForSpeedCheck
|
|| (packetDataLen > minLengthForSpeedCheck
|
||||||
&& (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs);
|
&& (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs);
|
||||||
if (slow) {
|
if (slow) {
|
||||||
// Check if large diff ack timestamp between replicas,
|
// Check if large diff ack timestamp between replicas,
|
||||||
// should try to avoid misjudgments that caused by GC STW.
|
// should try to avoid misjudgments that caused by GC STW.
|
||||||
if ((lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) || (
|
if (
|
||||||
lastAckTimestamp <= 0 && unfinished == 0)) {
|
(lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2)
|
||||||
LOG.info("Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, "
|
|| (lastAckTimestamp <= 0 && unfinished == 0)
|
||||||
+ "lastAckTimestamp={}, monitor name: {}", datanodeInfo, packetDataLen, processTimeMs,
|
) {
|
||||||
unfinished, lastAckTimestamp, this.name);
|
LOG.info(
|
||||||
|
"Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, "
|
||||||
|
+ "lastAckTimestamp={}, monitor name: {}",
|
||||||
|
datanodeInfo, packetDataLen, processTimeMs, unfinished, lastAckTimestamp, this.name);
|
||||||
if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) {
|
if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) {
|
||||||
excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack");
|
excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack");
|
||||||
}
|
}
|
||||||
|
@ -168,8 +170,10 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
||||||
private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long processTime) {
|
private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long processTime) {
|
||||||
Deque<PacketAckData> slowDNQueue = datanodeSlowDataQueue.getUnchecked(datanodeInfo);
|
Deque<PacketAckData> slowDNQueue = datanodeSlowDataQueue.getUnchecked(datanodeInfo);
|
||||||
long current = EnvironmentEdgeManager.currentTime();
|
long current = EnvironmentEdgeManager.currentTime();
|
||||||
while (!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl
|
while (
|
||||||
|| slowDNQueue.size() >= minSlowDetectCount)) {
|
!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl
|
||||||
|
|| slowDNQueue.size() >= minSlowDetectCount)
|
||||||
|
) {
|
||||||
slowDNQueue.removeFirst();
|
slowDNQueue.removeFirst();
|
||||||
}
|
}
|
||||||
slowDNQueue.addLast(new PacketAckData(dataLength, processTime));
|
slowDNQueue.addLast(new PacketAckData(dataLength, processTime));
|
||||||
|
@ -177,13 +181,13 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setConf(Configuration conf) {
|
private void setConf(Configuration conf) {
|
||||||
this.minSlowDetectCount = conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY,
|
this.minSlowDetectCount =
|
||||||
DEFAULT_WAL_SLOW_DETECT_MIN_COUNT);
|
conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, DEFAULT_WAL_SLOW_DETECT_MIN_COUNT);
|
||||||
this.slowDataTtl = conf.getLong(WAL_SLOW_DETECT_DATA_TTL_KEY, DEFAULT_WAL_SLOW_DETECT_DATA_TTL);
|
this.slowDataTtl = conf.getLong(WAL_SLOW_DETECT_DATA_TTL_KEY, DEFAULT_WAL_SLOW_DETECT_DATA_TTL);
|
||||||
this.slowPacketAckMs = conf.getLong(DATANODE_SLOW_PACKET_PROCESS_TIME_KEY,
|
this.slowPacketAckMs = conf.getLong(DATANODE_SLOW_PACKET_PROCESS_TIME_KEY,
|
||||||
DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME);
|
DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME);
|
||||||
this.minLengthForSpeedCheck = conf.getLong(
|
this.minLengthForSpeedCheck =
|
||||||
DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY,
|
conf.getLong(DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY,
|
||||||
DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH);
|
DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH);
|
||||||
this.minPacketFlushSpeedKBs = conf.getDouble(DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY,
|
this.minPacketFlushSpeedKBs = conf.getDouble(DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY,
|
||||||
DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED);
|
DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED);
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/*
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -21,8 +20,8 @@ package org.apache.hadoop.hbase.util;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Similar interface as {@link org.apache.hadoop.util.Progressable} but returns
|
* Similar interface as {@link org.apache.hadoop.util.Progressable} but returns a boolean to support
|
||||||
* a boolean to support canceling the operation.
|
* canceling the operation.
|
||||||
* <p/>
|
* <p/>
|
||||||
* Used for doing updating of OPENING znode during log replay on region open.
|
* Used for doing updating of OPENING znode during log replay on region open.
|
||||||
*/
|
*/
|
||||||
|
@ -30,8 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
public interface CancelableProgressable {
|
public interface CancelableProgressable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Report progress. Returns true if operations should continue, false if the
|
* Report progress. Returns true if operations should continue, false if the operation should be
|
||||||
* operation should be canceled and rolled back.
|
* canceled and rolled back.
|
||||||
* @return whether to continue (true) or cancel (false) the operation
|
* @return whether to continue (true) or cancel (false) the operation
|
||||||
*/
|
*/
|
||||||
boolean progress();
|
boolean progress();
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -120,8 +120,10 @@ public final class RecoverLeaseFSUtils {
|
||||||
// Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check
|
// Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check
|
||||||
// isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though.
|
// isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though.
|
||||||
long localStartWaiting = EnvironmentEdgeManager.currentTime();
|
long localStartWaiting = EnvironmentEdgeManager.currentTime();
|
||||||
while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPauseBase *
|
while (
|
||||||
nbAttempt) {
|
(EnvironmentEdgeManager.currentTime() - localStartWaiting)
|
||||||
|
< subsequentPauseBase * nbAttempt
|
||||||
|
) {
|
||||||
Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
|
Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
|
||||||
if (findIsFileClosedMeth) {
|
if (findIsFileClosedMeth) {
|
||||||
try {
|
try {
|
||||||
|
@ -152,10 +154,10 @@ public final class RecoverLeaseFSUtils {
|
||||||
private static boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout,
|
private static boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout,
|
||||||
final int nbAttempt, final Path p, final long startWaiting) {
|
final int nbAttempt, final Path p, final long startWaiting) {
|
||||||
if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) {
|
if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) {
|
||||||
LOG.warn("Cannot recoverLease after trying for " +
|
LOG.warn("Cannot recoverLease after trying for "
|
||||||
conf.getInt("hbase.lease.recovery.timeout", 900000) +
|
+ conf.getInt("hbase.lease.recovery.timeout", 900000)
|
||||||
"ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " +
|
+ "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; "
|
||||||
getLogMessageDetail(nbAttempt, p, startWaiting));
|
+ getLogMessageDetail(nbAttempt, p, startWaiting));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -170,8 +172,8 @@ public final class RecoverLeaseFSUtils {
|
||||||
boolean recovered = false;
|
boolean recovered = false;
|
||||||
try {
|
try {
|
||||||
recovered = dfs.recoverLease(p);
|
recovered = dfs.recoverLease(p);
|
||||||
LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") +
|
LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ")
|
||||||
getLogMessageDetail(nbAttempt, p, startWaiting));
|
+ getLogMessageDetail(nbAttempt, p, startWaiting));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) {
|
if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) {
|
||||||
// This exception comes out instead of FNFE, fix it
|
// This exception comes out instead of FNFE, fix it
|
||||||
|
@ -189,8 +191,8 @@ public final class RecoverLeaseFSUtils {
|
||||||
*/
|
*/
|
||||||
private static String getLogMessageDetail(final int nbAttempt, final Path p,
|
private static String getLogMessageDetail(final int nbAttempt, final Path p,
|
||||||
final long startWaiting) {
|
final long startWaiting) {
|
||||||
return "attempt=" + nbAttempt + " on file=" + p + " after " +
|
return "attempt=" + nbAttempt + " on file=" + p + " after "
|
||||||
(EnvironmentEdgeManager.currentTime() - startWaiting) + "ms";
|
+ (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms";
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.io.asyncfs;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
|
@ -44,19 +45,15 @@ public class TestExcludeDatanodeManager {
|
||||||
StreamSlowMonitor streamSlowDNsMonitor =
|
StreamSlowMonitor streamSlowDNsMonitor =
|
||||||
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
|
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
|
||||||
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
|
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
|
||||||
DatanodeInfo datanodeInfo =
|
DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0")
|
||||||
new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1")
|
.setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222)
|
||||||
.setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333)
|
.setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build();
|
||||||
.setIpcPort(444).setNetworkLocation("location1").build();
|
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
||||||
streamSlowDNsMonitor
|
System.currentTimeMillis() - 5100, 0);
|
||||||
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
||||||
System.currentTimeMillis() - 5100, 0);
|
System.currentTimeMillis() - 5100, 0);
|
||||||
streamSlowDNsMonitor
|
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
||||||
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
System.currentTimeMillis() - 5100, 0);
|
||||||
System.currentTimeMillis() - 5100, 0);
|
|
||||||
streamSlowDNsMonitor
|
|
||||||
.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100,
|
|
||||||
System.currentTimeMillis() - 5100, 0);
|
|
||||||
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
|
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
|
||||||
assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo));
|
assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo));
|
||||||
}
|
}
|
||||||
|
@ -68,19 +65,15 @@ public class TestExcludeDatanodeManager {
|
||||||
StreamSlowMonitor streamSlowDNsMonitor =
|
StreamSlowMonitor streamSlowDNsMonitor =
|
||||||
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
|
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
|
||||||
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
|
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
|
||||||
DatanodeInfo datanodeInfo =
|
DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0")
|
||||||
new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1")
|
.setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222)
|
||||||
.setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333)
|
.setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build();
|
||||||
.setIpcPort(444).setNetworkLocation("location1").build();
|
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
||||||
streamSlowDNsMonitor
|
System.currentTimeMillis() - 7000, 0);
|
||||||
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
||||||
System.currentTimeMillis() - 7000, 0);
|
System.currentTimeMillis() - 7000, 0);
|
||||||
streamSlowDNsMonitor
|
streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
||||||
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
System.currentTimeMillis() - 7000, 0);
|
||||||
System.currentTimeMillis() - 7000, 0);
|
|
||||||
streamSlowDNsMonitor
|
|
||||||
.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000,
|
|
||||||
System.currentTimeMillis() - 7000, 0);
|
|
||||||
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
|
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
|
||||||
assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo));
|
assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -57,6 +57,7 @@ import org.junit.experimental.categories.Category;
|
||||||
import org.junit.rules.TestName;
|
import org.junit.rules.TestName;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
|
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
|
||||||
import org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
|
import org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
|
||||||
import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
|
import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
|
||||||
|
@ -240,9 +241,9 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
|
||||||
StreamSlowMonitor streamSlowDNsMonitor =
|
StreamSlowMonitor streamSlowDNsMonitor =
|
||||||
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
|
excludeDatanodeManager.getStreamSlowMonitor("testMonitor");
|
||||||
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
|
assertEquals(0, excludeDatanodeManager.getExcludeDNs().size());
|
||||||
try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS,
|
try (FanOutOneBlockAsyncDFSOutput output =
|
||||||
f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop,
|
FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3,
|
||||||
CHANNEL_CLASS, streamSlowDNsMonitor)) {
|
FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, streamSlowDNsMonitor)) {
|
||||||
// should exclude the dead dn when retry so here we only have 2 DNs in pipeline
|
// should exclude the dead dn when retry so here we only have 2 DNs in pipeline
|
||||||
assertEquals(2, output.getPipeline().length);
|
assertEquals(2, output.getPipeline().length);
|
||||||
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
|
assertEquals(1, excludeDatanodeManager.getExcludeDNs().size());
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -47,6 +47,7 @@ import org.junit.experimental.categories.Category;
|
||||||
import org.junit.rules.TestName;
|
import org.junit.rules.TestName;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
|
import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
|
||||||
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
|
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
|
||||||
import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
|
import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
|
||||||
|
@ -70,10 +71,10 @@ public class TestFanOutOneBlockAsyncDFSOutputHang extends AsyncFSTestBase {
|
||||||
|
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutputHang.class);
|
HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutputHang.class);
|
||||||
|
|
||||||
private static final Logger LOG =
|
private static final Logger LOG =
|
||||||
LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutputHang.class);
|
LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutputHang.class);
|
||||||
|
|
||||||
private static DistributedFileSystem FS;
|
private static DistributedFileSystem FS;
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -31,7 +31,7 @@ public class TestSendBufSizePredictor {
|
||||||
|
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
HBaseClassTestRule.forClass(TestSendBufSizePredictor.class);
|
HBaseClassTestRule.forClass(TestSendBufSizePredictor.class);
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void test() {
|
public void test() {
|
||||||
|
|
|
@ -110,9 +110,9 @@ public final class HBaseKerberosUtils {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set up configuration for a secure HDFS+HBase cluster.
|
* Set up configuration for a secure HDFS+HBase cluster.
|
||||||
* @param conf configuration object.
|
* @param conf configuration object.
|
||||||
* @param servicePrincipal service principal used by NN, HM and RS.
|
* @param servicePrincipal service principal used by NN, HM and RS.
|
||||||
* @param spnegoPrincipal SPNEGO principal used by NN web UI.
|
* @param spnegoPrincipal SPNEGO principal used by NN web UI.
|
||||||
*/
|
*/
|
||||||
public static void setSecuredConfiguration(Configuration conf, String servicePrincipal,
|
public static void setSecuredConfiguration(Configuration conf, String servicePrincipal,
|
||||||
String spnegoPrincipal) {
|
String spnegoPrincipal) {
|
||||||
|
@ -156,7 +156,7 @@ public final class HBaseKerberosUtils {
|
||||||
/**
|
/**
|
||||||
* Set up SSL configuration for HDFS NameNode and DataNode.
|
* Set up SSL configuration for HDFS NameNode and DataNode.
|
||||||
* @param utility a HBaseTestingUtility object.
|
* @param utility a HBaseTestingUtility object.
|
||||||
* @param clazz the caller test class.
|
* @param clazz the caller test class.
|
||||||
* @throws Exception if unable to set up SSL configuration
|
* @throws Exception if unable to set up SSL configuration
|
||||||
*/
|
*/
|
||||||
public static void setSSLConfiguration(HBaseCommonTestingUtil utility, Class<?> clazz)
|
public static void setSSLConfiguration(HBaseCommonTestingUtil utility, Class<?> clazz)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.util;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
|
@ -69,8 +68,8 @@ public class TestRecoverLeaseFSUtils {
|
||||||
Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
|
Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
|
||||||
// Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two
|
// Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two
|
||||||
// invocations will happen pretty fast... the we fall into the longer wait loop).
|
// invocations will happen pretty fast... the we fall into the longer wait loop).
|
||||||
assertTrue((EnvironmentEdgeManager.currentTime() - startTime) > (3 *
|
assertTrue((EnvironmentEdgeManager.currentTime() - startTime)
|
||||||
HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
|
> (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
<?xml version="1.0"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<!--
|
<!--
|
||||||
/**
|
/**
|
||||||
|
@ -21,34 +21,14 @@
|
||||||
-->
|
-->
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>hbase-build-configuration</artifactId>
|
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-build-configuration</artifactId>
|
||||||
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
<version>3.0.0-alpha-3-SNAPSHOT</version>
|
||||||
<relativePath>../hbase-build-configuration</relativePath>
|
<relativePath>../hbase-build-configuration</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
<artifactId>hbase-backup</artifactId>
|
<artifactId>hbase-backup</artifactId>
|
||||||
<name>Apache HBase - Backup</name>
|
<name>Apache HBase - Backup</name>
|
||||||
<description>Backup for HBase</description>
|
<description>Backup for HBase</description>
|
||||||
<build>
|
|
||||||
<plugins>
|
|
||||||
<plugin>
|
|
||||||
<!--Make it so assembly:single does nothing in here-->
|
|
||||||
<artifactId>maven-assembly-plugin</artifactId>
|
|
||||||
<configuration>
|
|
||||||
<skipAssembly>true</skipAssembly>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
<!-- Make a jar and put the sources in the jar -->
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-source-plugin</artifactId>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>net.revelc.code</groupId>
|
|
||||||
<artifactId>warbucks-maven-plugin</artifactId>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
|
||||||
</build>
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<!-- Intra-project dependencies -->
|
<!-- Intra-project dependencies -->
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -173,12 +153,34 @@
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<!--Make it so assembly:single does nothing in here-->
|
||||||
|
<artifactId>maven-assembly-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<skipAssembly>true</skipAssembly>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
<!-- Make a jar and put the sources in the jar -->
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-source-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>net.revelc.code</groupId>
|
||||||
|
<artifactId>warbucks-maven-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
<profiles>
|
<profiles>
|
||||||
<!-- Profile for building against Hadoop 3.0.0. Activate by default -->
|
<!-- Profile for building against Hadoop 3.0.0. Activate by default -->
|
||||||
<profile>
|
<profile>
|
||||||
<id>hadoop-3.0</id>
|
<id>hadoop-3.0</id>
|
||||||
<activation>
|
<activation>
|
||||||
<property><name>!hadoop.profile</name></property>
|
<property>
|
||||||
|
<name>!hadoop.profile</name>
|
||||||
|
</property>
|
||||||
</activation>
|
</activation>
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -213,8 +215,7 @@
|
||||||
<artifactId>lifecycle-mapping</artifactId>
|
<artifactId>lifecycle-mapping</artifactId>
|
||||||
<configuration>
|
<configuration>
|
||||||
<lifecycleMappingMetadata>
|
<lifecycleMappingMetadata>
|
||||||
<pluginExecutions>
|
<pluginExecutions/>
|
||||||
</pluginExecutions>
|
|
||||||
</lifecycleMappingMetadata>
|
</lifecycleMappingMetadata>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,13 +15,11 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.backup.util.BackupSet;
|
import org.apache.hadoop.hbase.backup.util.BackupSet;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
@ -30,8 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
* The administrative API for HBase Backup. Construct an instance and call {@link #close()}
|
* The administrative API for HBase Backup. Construct an instance and call {@link #close()}
|
||||||
* afterwards.
|
* afterwards.
|
||||||
* <p>
|
* <p>
|
||||||
* BackupAdmin can be used to create backups, restore data from backups and for other
|
* BackupAdmin can be used to create backups, restore data from backups and for other backup-related
|
||||||
* backup-related operations.
|
* operations.
|
||||||
* @since 2.0
|
* @since 2.0
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
@ -71,9 +69,9 @@ public interface BackupAdmin extends Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Merge backup images command
|
* Merge backup images command
|
||||||
* @param backupIds array of backup ids of images to be merged
|
* @param backupIds array of backup ids of images to be merged The resulting backup image will
|
||||||
* The resulting backup image will have the same backup id as the most
|
* have the same backup id as the most recent image from a list of images to be
|
||||||
* recent image from a list of images to be merged
|
* merged
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
void mergeBackups(String[] backupIds) throws IOException;
|
void mergeBackups(String[] backupIds) throws IOException;
|
||||||
|
@ -120,7 +118,7 @@ public interface BackupAdmin extends Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add tables to backup set command
|
* Add tables to backup set command
|
||||||
* @param name name of backup set.
|
* @param name name of backup set.
|
||||||
* @param tables array of tables to be added to this set.
|
* @param tables array of tables to be added to this set.
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
|
@ -128,7 +126,7 @@ public interface BackupAdmin extends Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove tables from backup set
|
* Remove tables from backup set
|
||||||
* @param name name of backup set.
|
* @param name name of backup set.
|
||||||
* @param tables array of tables to be removed from this set.
|
* @param tables array of tables to be removed from this set.
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -18,13 +18,11 @@
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
|
import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
|
||||||
import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
|
import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
|
||||||
import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
|
import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,11 +15,9 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configurable;
|
import org.apache.hadoop.conf.Configurable;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.backup.impl.BackupManager;
|
import org.apache.hadoop.hbase.backup.impl.BackupManager;
|
||||||
|
@ -34,16 +32,16 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
public interface BackupCopyJob extends Configurable {
|
public interface BackupCopyJob extends Configurable {
|
||||||
/**
|
/**
|
||||||
* Copy backup data to destination
|
* Copy backup data to destination
|
||||||
* @param backupInfo context object
|
* @param backupInfo context object
|
||||||
* @param backupManager backup manager
|
* @param backupManager backup manager
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @param backupType backup type (FULL or INCREMENTAL)
|
* @param backupType backup type (FULL or INCREMENTAL)
|
||||||
* @param options array of options (implementation-specific)
|
* @param options array of options (implementation-specific)
|
||||||
* @return result (0 - success, -1 failure )
|
* @return result (0 - success, -1 failure )
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
int copy(BackupInfo backupInfo, BackupManager backupManager, Configuration conf,
|
int copy(BackupInfo backupInfo, BackupManager backupManager, Configuration conf,
|
||||||
BackupType backupType, String[] options) throws IOException;
|
BackupType backupType, String[] options) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cancel copy job
|
* Cancel copy job
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -58,9 +58,7 @@ import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
|
||||||
* Command-line entry point for backup operation
|
* Command-line entry point for backup operation
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class BackupDriver extends AbstractHBaseTool {
|
public class BackupDriver extends AbstractHBaseTool {
|
||||||
|
|
|
@ -23,7 +23,6 @@ import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -54,7 +53,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
|
||||||
private Connection connection;
|
private Connection connection;
|
||||||
private long prevReadFromBackupTbl = 0, // timestamp of most recent read from backup:system table
|
private long prevReadFromBackupTbl = 0, // timestamp of most recent read from backup:system table
|
||||||
secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from backup:system table
|
secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from backup:system table
|
||||||
//used by unit test to skip reading backup:system
|
// used by unit test to skip reading backup:system
|
||||||
private boolean checkForFullyBackedUpTables = true;
|
private boolean checkForFullyBackedUpTables = true;
|
||||||
private List<TableName> fullyBackedUpTables = null;
|
private List<TableName> fullyBackedUpTables = null;
|
||||||
|
|
||||||
|
@ -79,8 +78,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
|
||||||
connection = ConnectionFactory.createConnection(conf);
|
connection = ConnectionFactory.createConnection(conf);
|
||||||
}
|
}
|
||||||
try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
||||||
Map<byte[], List<Path>>[] res =
|
Map<byte[], List<Path>>[] res = tbl.readBulkLoadedFiles(null, tableList);
|
||||||
tbl.readBulkLoadedFiles(null, tableList);
|
|
||||||
secondPrevReadFromBackupTbl = prevReadFromBackupTbl;
|
secondPrevReadFromBackupTbl = prevReadFromBackupTbl;
|
||||||
prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime();
|
prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime();
|
||||||
return getFilenameFromBulkLoad(res);
|
return getFilenameFromBulkLoad(res);
|
||||||
|
@ -91,6 +89,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
|
||||||
void setCheckForFullyBackedUpTables(boolean b) {
|
void setCheckForFullyBackedUpTables(boolean b) {
|
||||||
checkForFullyBackedUpTables = b;
|
checkForFullyBackedUpTables = b;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
|
public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
|
||||||
if (conf == null) {
|
if (conf == null) {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,7 +15,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
|
||||||
|
@ -59,7 +59,10 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
||||||
* Backup session states
|
* Backup session states
|
||||||
*/
|
*/
|
||||||
public enum BackupState {
|
public enum BackupState {
|
||||||
RUNNING, COMPLETE, FAILED, ANY
|
RUNNING,
|
||||||
|
COMPLETE,
|
||||||
|
FAILED,
|
||||||
|
ANY
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -67,7 +70,12 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
||||||
* BackupState.RUNNING
|
* BackupState.RUNNING
|
||||||
*/
|
*/
|
||||||
public enum BackupPhase {
|
public enum BackupPhase {
|
||||||
REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST
|
REQUEST,
|
||||||
|
SNAPSHOT,
|
||||||
|
PREPARE_INCREMENTAL,
|
||||||
|
SNAPSHOTCOPY,
|
||||||
|
INCREMENTAL_COPY,
|
||||||
|
STORE_MANIFEST
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -137,8 +145,8 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
||||||
private Map<TableName, Map<String, Long>> tableSetTimestampMap;
|
private Map<TableName, Map<String, Long>> tableSetTimestampMap;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Previous Region server log timestamps for table set after distributed log roll key -
|
* Previous Region server log timestamps for table set after distributed log roll key - table
|
||||||
* table name, value - map of RegionServer hostname -> last log rolled timestamp
|
* name, value - map of RegionServer hostname -> last log rolled timestamp
|
||||||
*/
|
*/
|
||||||
private Map<TableName, Map<String, Long>> incrTimestampMap;
|
private Map<TableName, Map<String, Long>> incrTimestampMap;
|
||||||
|
|
||||||
|
@ -198,8 +206,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
||||||
return tableSetTimestampMap;
|
return tableSetTimestampMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setTableSetTimestampMap(Map<TableName,
|
public void setTableSetTimestampMap(Map<TableName, Map<String, Long>> tableSetTimestampMap) {
|
||||||
Map<String, Long>> tableSetTimestampMap) {
|
|
||||||
this.tableSetTimestampMap = tableSetTimestampMap;
|
this.tableSetTimestampMap = tableSetTimestampMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -357,8 +364,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
||||||
* Set the new region server log timestamps after distributed log roll
|
* Set the new region server log timestamps after distributed log roll
|
||||||
* @param prevTableSetTimestampMap table timestamp map
|
* @param prevTableSetTimestampMap table timestamp map
|
||||||
*/
|
*/
|
||||||
public void setIncrTimestampMap(Map<TableName,
|
public void setIncrTimestampMap(Map<TableName, Map<String, Long>> prevTableSetTimestampMap) {
|
||||||
Map<String, Long>> prevTableSetTimestampMap) {
|
|
||||||
this.incrTimestampMap = prevTableSetTimestampMap;
|
this.incrTimestampMap = prevTableSetTimestampMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -482,8 +488,8 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
||||||
context.setState(BackupInfo.BackupState.valueOf(proto.getBackupState().name()));
|
context.setState(BackupInfo.BackupState.valueOf(proto.getBackupState().name()));
|
||||||
}
|
}
|
||||||
|
|
||||||
context.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(),
|
context
|
||||||
proto.getBackupId()));
|
.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(), proto.getBackupId()));
|
||||||
|
|
||||||
if (proto.hasBackupPhase()) {
|
if (proto.hasBackupPhase()) {
|
||||||
context.setPhase(BackupPhase.valueOf(proto.getBackupPhase().name()));
|
context.setPhase(BackupPhase.valueOf(proto.getBackupPhase().name()));
|
||||||
|
@ -507,12 +513,12 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
||||||
return map;
|
return map;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Map<TableName, Map<String, Long>> getTableSetTimestampMap(
|
private static Map<TableName, Map<String, Long>>
|
||||||
Map<String, BackupProtos.BackupInfo.RSTimestampMap> map) {
|
getTableSetTimestampMap(Map<String, BackupProtos.BackupInfo.RSTimestampMap> map) {
|
||||||
Map<TableName, Map<String, Long>> tableSetTimestampMap = new HashMap<>();
|
Map<TableName, Map<String, Long>> tableSetTimestampMap = new HashMap<>();
|
||||||
for (Entry<String, BackupProtos.BackupInfo.RSTimestampMap> entry : map.entrySet()) {
|
for (Entry<String, BackupProtos.BackupInfo.RSTimestampMap> entry : map.entrySet()) {
|
||||||
tableSetTimestampMap
|
tableSetTimestampMap.put(TableName.valueOf(entry.getKey()),
|
||||||
.put(TableName.valueOf(entry.getKey()), entry.getValue().getRsTimestampMap());
|
entry.getValue().getRsTimestampMap());
|
||||||
}
|
}
|
||||||
|
|
||||||
return tableSetTimestampMap;
|
return tableSetTimestampMap;
|
||||||
|
@ -549,7 +555,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
||||||
public String getStatusAndProgressAsString() {
|
public String getStatusAndProgressAsString() {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
sb.append("id: ").append(getBackupId()).append(" state: ").append(getState())
|
sb.append("id: ").append(getBackupId()).append(" state: ").append(getState())
|
||||||
.append(" progress: ").append(getProgress());
|
.append(" progress: ").append(getProgress());
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -567,7 +573,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
|
||||||
@Override
|
@Override
|
||||||
public int compareTo(BackupInfo o) {
|
public int compareTo(BackupInfo o) {
|
||||||
Long thisTS =
|
Long thisTS =
|
||||||
Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1));
|
Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1));
|
||||||
Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1));
|
Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1));
|
||||||
return thisTS.compareTo(otherTS);
|
return thisTS.compareTo(otherTS);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,11 +15,9 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configurable;
|
import org.apache.hadoop.conf.Configurable;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
|
@ -32,7 +30,6 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
public interface BackupMergeJob extends Configurable {
|
public interface BackupMergeJob extends Configurable {
|
||||||
/**
|
/**
|
||||||
* Run backup merge operation.
|
* Run backup merge operation.
|
||||||
*
|
|
||||||
* @param backupIds backup image ids
|
* @param backupIds backup image ids
|
||||||
* @throws IOException if the backup merge operation fails
|
* @throws IOException if the backup merge operation fails
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -7,14 +7,13 @@
|
||||||
* "License"); you may not use this file except in compliance
|
* "License"); you may not use this file except in compliance
|
||||||
* with the License. You may obtain a copy of the License at
|
* with the License. You may obtain a copy of the License at
|
||||||
*
|
*
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
*
|
*
|
||||||
* Unless required by applicable law or agreed to in writing,
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
* software distributed under the License is distributed on an
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
* KIND, either express or implied. See the License for the
|
* See the License for the specific language governing permissions and
|
||||||
* specific language governing permissions and limitations
|
* limitations under the License.
|
||||||
* under the License.
|
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
|
@ -22,7 +21,6 @@ import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
|
@ -56,7 +54,7 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver {
|
||||||
@Override
|
@Override
|
||||||
public void postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
public void postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||||
List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths)
|
List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Configuration cfg = ctx.getEnvironment().getConfiguration();
|
Configuration cfg = ctx.getEnvironment().getConfiguration();
|
||||||
if (finalPaths == null) {
|
if (finalPaths == null) {
|
||||||
// there is no need to record state
|
// there is no need to record state
|
||||||
|
@ -67,7 +65,7 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
try (Connection connection = ConnectionFactory.createConnection(cfg);
|
try (Connection connection = ConnectionFactory.createConnection(cfg);
|
||||||
BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
||||||
List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
|
List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
|
||||||
RegionInfo info = ctx.getEnvironment().getRegionInfo();
|
RegionInfo info = ctx.getEnvironment().getRegionInfo();
|
||||||
TableName tableName = info.getTable();
|
TableName tableName = info.getTable();
|
||||||
|
@ -82,16 +80,17 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver {
|
||||||
LOG.error("Failed to get tables which have been fully backed up", ioe);
|
LOG.error("Failed to get tables which have been fully backed up", ioe);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
|
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||||
final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
|
final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
|
||||||
Configuration cfg = ctx.getEnvironment().getConfiguration();
|
Configuration cfg = ctx.getEnvironment().getConfiguration();
|
||||||
if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) {
|
if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) {
|
||||||
LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled");
|
LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
try (Connection connection = ConnectionFactory.createConnection(cfg);
|
try (Connection connection = ConnectionFactory.createConnection(cfg);
|
||||||
BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
BackupSystemTable tbl = new BackupSystemTable(connection)) {
|
||||||
List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
|
List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
|
||||||
RegionInfo info = ctx.getEnvironment().getRegionInfo();
|
RegionInfo info = ctx.getEnvironment().getRegionInfo();
|
||||||
TableName tableName = info.getTable();
|
TableName tableName = info.getTable();
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,11 +15,9 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,7 +15,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
@ -45,14 +44,14 @@ public interface BackupRestoreConstants {
|
||||||
int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 10000;
|
int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 10000;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Drivers option list
|
* Drivers option list
|
||||||
*/
|
*/
|
||||||
String OPTION_OVERWRITE = "o";
|
String OPTION_OVERWRITE = "o";
|
||||||
String OPTION_OVERWRITE_DESC = "Overwrite data if any of the restore target tables exists";
|
String OPTION_OVERWRITE_DESC = "Overwrite data if any of the restore target tables exists";
|
||||||
|
|
||||||
String OPTION_CHECK = "c";
|
String OPTION_CHECK = "c";
|
||||||
String OPTION_CHECK_DESC =
|
String OPTION_CHECK_DESC =
|
||||||
"Check restore sequence and dependencies only (does not execute the command)";
|
"Check restore sequence and dependencies only (does not execute the command)";
|
||||||
|
|
||||||
String OPTION_SET = "s";
|
String OPTION_SET = "s";
|
||||||
String OPTION_SET_DESC = "Backup set name";
|
String OPTION_SET_DESC = "Backup set name";
|
||||||
|
@ -62,8 +61,8 @@ public interface BackupRestoreConstants {
|
||||||
String OPTION_DEBUG_DESC = "Enable debug loggings";
|
String OPTION_DEBUG_DESC = "Enable debug loggings";
|
||||||
|
|
||||||
String OPTION_TABLE = "t";
|
String OPTION_TABLE = "t";
|
||||||
String OPTION_TABLE_DESC = "Table name. If specified, only backup images,"
|
String OPTION_TABLE_DESC =
|
||||||
+ " which contain this table will be listed.";
|
"Table name. If specified, only backup images," + " which contain this table will be listed.";
|
||||||
|
|
||||||
String OPTION_LIST = "l";
|
String OPTION_LIST = "l";
|
||||||
String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated.";
|
String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated.";
|
||||||
|
@ -84,37 +83,32 @@ public interface BackupRestoreConstants {
|
||||||
String OPTION_KEEP = "k";
|
String OPTION_KEEP = "k";
|
||||||
String OPTION_KEEP_DESC = "Specifies maximum age of backup (in days) to keep during bulk delete";
|
String OPTION_KEEP_DESC = "Specifies maximum age of backup (in days) to keep during bulk delete";
|
||||||
|
|
||||||
|
|
||||||
String OPTION_TABLE_MAPPING = "m";
|
String OPTION_TABLE_MAPPING = "m";
|
||||||
String OPTION_TABLE_MAPPING_DESC =
|
String OPTION_TABLE_MAPPING_DESC = "A comma separated list of target tables. "
|
||||||
"A comma separated list of target tables. "
|
+ "If specified, each table in <tables> must have a mapping";
|
||||||
+ "If specified, each table in <tables> must have a mapping";
|
|
||||||
String OPTION_YARN_QUEUE_NAME = "q";
|
String OPTION_YARN_QUEUE_NAME = "q";
|
||||||
String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on";
|
String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on";
|
||||||
String OPTION_YARN_QUEUE_NAME_RESTORE_DESC = "Yarn queue name to run backup restore command on";
|
String OPTION_YARN_QUEUE_NAME_RESTORE_DESC = "Yarn queue name to run backup restore command on";
|
||||||
|
|
||||||
String JOB_NAME_CONF_KEY = "mapreduce.job.name";
|
String JOB_NAME_CONF_KEY = "mapreduce.job.name";
|
||||||
|
|
||||||
String BACKUP_CONFIG_STRING = BackupRestoreConstants.BACKUP_ENABLE_KEY
|
String BACKUP_CONFIG_STRING =
|
||||||
+ "=true\n"
|
BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n" + "hbase.master.logcleaner.plugins="
|
||||||
+ "hbase.master.logcleaner.plugins="
|
+ "YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
|
||||||
+"YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
|
+ "hbase.procedure.master.classes=YOUR_CLASSES,"
|
||||||
+ "hbase.procedure.master.classes=YOUR_CLASSES,"
|
+ "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
|
||||||
+"org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
|
+ "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
|
||||||
+ "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
|
+ "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
|
||||||
+ "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
|
+ "hbase.coprocessor.region.classes=YOUR_CLASSES,"
|
||||||
+ "hbase.coprocessor.region.classes=YOUR_CLASSES,"
|
+ "org.apache.hadoop.hbase.backup.BackupObserver\n" + "and restart the cluster\n"
|
||||||
+ "org.apache.hadoop.hbase.backup.BackupObserver\n"
|
+ "For more information please see http://hbase.apache.org/book.html#backuprestore\n";
|
||||||
+ "and restart the cluster\n"
|
String ENABLE_BACKUP = "Backup is not enabled. To enable backup, " + "in hbase-site.xml, set:\n "
|
||||||
+ "For more information please see http://hbase.apache.org/book.html#backuprestore\n";
|
+ BACKUP_CONFIG_STRING;
|
||||||
String ENABLE_BACKUP = "Backup is not enabled. To enable backup, "+
|
|
||||||
"in hbase-site.xml, set:\n "
|
|
||||||
+ BACKUP_CONFIG_STRING;
|
|
||||||
|
|
||||||
String VERIFY_BACKUP = "To enable backup, in hbase-site.xml, set:\n " + BACKUP_CONFIG_STRING;
|
String VERIFY_BACKUP = "To enable backup, in hbase-site.xml, set:\n " + BACKUP_CONFIG_STRING;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Delimiter in table name list in restore command
|
* Delimiter in table name list in restore command
|
||||||
*/
|
*/
|
||||||
String TABLENAME_DELIMITER_IN_COMMAND = ",";
|
String TABLENAME_DELIMITER_IN_COMMAND = ",";
|
||||||
|
|
||||||
|
@ -123,7 +117,24 @@ public interface BackupRestoreConstants {
|
||||||
String BACKUPID_PREFIX = "backup_";
|
String BACKUPID_PREFIX = "backup_";
|
||||||
|
|
||||||
enum BackupCommand {
|
enum BackupCommand {
|
||||||
CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS,
|
CREATE,
|
||||||
SET, SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST, REPAIR
|
CANCEL,
|
||||||
|
DELETE,
|
||||||
|
DESCRIBE,
|
||||||
|
HISTORY,
|
||||||
|
STATUS,
|
||||||
|
CONVERT,
|
||||||
|
MERGE,
|
||||||
|
STOP,
|
||||||
|
SHOW,
|
||||||
|
HELP,
|
||||||
|
PROGRESS,
|
||||||
|
SET,
|
||||||
|
SET_ADD,
|
||||||
|
SET_REMOVE,
|
||||||
|
SET_DELETE,
|
||||||
|
SET_DESCRIBE,
|
||||||
|
SET_LIST,
|
||||||
|
REPAIR
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -26,7 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Factory implementation for backup/restore related jobs
|
* Factory implementation for backup/restore related jobs
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public final class BackupRestoreFactory {
|
public final class BackupRestoreFactory {
|
||||||
|
@ -45,7 +44,7 @@ public final class BackupRestoreFactory {
|
||||||
*/
|
*/
|
||||||
public static RestoreJob getRestoreJob(Configuration conf) {
|
public static RestoreJob getRestoreJob(Configuration conf) {
|
||||||
Class<? extends RestoreJob> cls =
|
Class<? extends RestoreJob> cls =
|
||||||
conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class);
|
conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class);
|
||||||
RestoreJob service = ReflectionUtils.newInstance(cls, conf);
|
RestoreJob service = ReflectionUtils.newInstance(cls, conf);
|
||||||
service.setConf(conf);
|
service.setConf(conf);
|
||||||
return service;
|
return service;
|
||||||
|
@ -57,9 +56,8 @@ public final class BackupRestoreFactory {
|
||||||
* @return backup copy job instance
|
* @return backup copy job instance
|
||||||
*/
|
*/
|
||||||
public static BackupCopyJob getBackupCopyJob(Configuration conf) {
|
public static BackupCopyJob getBackupCopyJob(Configuration conf) {
|
||||||
Class<? extends BackupCopyJob> cls =
|
Class<? extends BackupCopyJob> cls = conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS,
|
||||||
conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyJob.class,
|
MapReduceBackupCopyJob.class, BackupCopyJob.class);
|
||||||
BackupCopyJob.class);
|
|
||||||
BackupCopyJob service = ReflectionUtils.newInstance(cls, conf);
|
BackupCopyJob service = ReflectionUtils.newInstance(cls, conf);
|
||||||
service.setConf(conf);
|
service.setConf(conf);
|
||||||
return service;
|
return service;
|
||||||
|
@ -71,9 +69,8 @@ public final class BackupRestoreFactory {
|
||||||
* @return backup merge job instance
|
* @return backup merge job instance
|
||||||
*/
|
*/
|
||||||
public static BackupMergeJob getBackupMergeJob(Configuration conf) {
|
public static BackupMergeJob getBackupMergeJob(Configuration conf) {
|
||||||
Class<? extends BackupMergeJob> cls =
|
Class<? extends BackupMergeJob> cls = conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS,
|
||||||
conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS, MapReduceBackupMergeJob.class,
|
MapReduceBackupMergeJob.class, BackupMergeJob.class);
|
||||||
BackupMergeJob.class);
|
|
||||||
BackupMergeJob service = ReflectionUtils.newInstance(cls, conf);
|
BackupMergeJob service = ReflectionUtils.newInstance(cls, conf);
|
||||||
service.setConf(conf);
|
service.setConf(conf);
|
||||||
return service;
|
return service;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,11 +15,11 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
||||||
|
|
||||||
|
@ -29,14 +29,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class BackupTableInfo {
|
public class BackupTableInfo {
|
||||||
/*
|
/*
|
||||||
* Table name for backup
|
* Table name for backup
|
||||||
*/
|
*/
|
||||||
private TableName table;
|
private TableName table;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Snapshot name for offline/online snapshot
|
* Snapshot name for offline/online snapshot
|
||||||
*/
|
*/
|
||||||
private String snapshotName = null;
|
private String snapshotName = null;
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,13 @@
|
||||||
/**
|
/*
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
* to you under the Apache License, Version 2.0 (the
|
* to you under the Apache License, Version 2.0 (the
|
||||||
* "License"); you may not use this file except in compliance
|
* "License"); you may not use this file except in compliance
|
||||||
* with the License. You may obtain a copy of the License at
|
* with the License. You may obtain a copy of the License at
|
||||||
*
|
*
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
*
|
*
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
@ -16,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -52,15 +49,15 @@ public final class HBackupFileSystem {
|
||||||
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
|
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
|
||||||
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
|
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
|
||||||
* @param backupRootDir backup root directory
|
* @param backupRootDir backup root directory
|
||||||
* @param backupId backup id
|
* @param backupId backup id
|
||||||
* @param tableName table name
|
* @param tableName table name
|
||||||
* @return backupPath String for the particular table
|
* @return backupPath String for the particular table
|
||||||
*/
|
*/
|
||||||
public static String
|
public static String getTableBackupDir(String backupRootDir, String backupId,
|
||||||
getTableBackupDir(String backupRootDir, String backupId, TableName tableName) {
|
TableName tableName) {
|
||||||
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
|
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
|
||||||
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
|
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
|
||||||
+ Path.SEPARATOR;
|
+ Path.SEPARATOR;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -75,7 +72,7 @@ public final class HBackupFileSystem {
|
||||||
/**
|
/**
|
||||||
* Get backup tmp directory for backupId
|
* Get backup tmp directory for backupId
|
||||||
* @param backupRoot backup root
|
* @param backupRoot backup root
|
||||||
* @param backupId backup id
|
* @param backupId backup id
|
||||||
* @return backup tmp directory path
|
* @return backup tmp directory path
|
||||||
*/
|
*/
|
||||||
public static Path getBackupTmpDirPathForBackupId(String backupRoot, String backupId) {
|
public static Path getBackupTmpDirPathForBackupId(String backupRoot, String backupId) {
|
||||||
|
@ -83,7 +80,7 @@ public final class HBackupFileSystem {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getTableBackupDataDir(String backupRootDir, String backupId,
|
public static String getTableBackupDataDir(String backupRootDir, String backupId,
|
||||||
TableName tableName) {
|
TableName tableName) {
|
||||||
return getTableBackupDir(backupRootDir, backupId, tableName) + Path.SEPARATOR + "data";
|
return getTableBackupDir(backupRootDir, backupId, tableName) + Path.SEPARATOR + "data";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,8 +94,8 @@ public final class HBackupFileSystem {
|
||||||
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
|
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
|
||||||
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
|
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
|
||||||
* @param backupRootPath backup root path
|
* @param backupRootPath backup root path
|
||||||
* @param tableName table name
|
* @param tableName table name
|
||||||
* @param backupId backup Id
|
* @param backupId backup Id
|
||||||
* @return backupPath for the particular table
|
* @return backupPath for the particular table
|
||||||
*/
|
*/
|
||||||
public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) {
|
public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) {
|
||||||
|
@ -109,12 +106,12 @@ public final class HBackupFileSystem {
|
||||||
* Given the backup root dir and the backup id, return the log file location for an incremental
|
* Given the backup root dir and the backup id, return the log file location for an incremental
|
||||||
* backup.
|
* backup.
|
||||||
* @param backupRootDir backup root directory
|
* @param backupRootDir backup root directory
|
||||||
* @param backupId backup id
|
* @param backupId backup id
|
||||||
* @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738"
|
* @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738"
|
||||||
*/
|
*/
|
||||||
public static String getLogBackupDir(String backupRootDir, String backupId) {
|
public static String getLogBackupDir(String backupRootDir, String backupId) {
|
||||||
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
|
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
|
||||||
+ HConstants.HREGION_LOGDIR_NAME;
|
+ HConstants.HREGION_LOGDIR_NAME;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Path getLogBackupPath(String backupRootDir, String backupId) {
|
public static Path getLogBackupPath(String backupRootDir, String backupId) {
|
||||||
|
@ -124,37 +121,35 @@ public final class HBackupFileSystem {
|
||||||
// TODO we do not keep WAL files anymore
|
// TODO we do not keep WAL files anymore
|
||||||
// Move manifest file to other place
|
// Move manifest file to other place
|
||||||
private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId)
|
private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
FileSystem fs = backupRootPath.getFileSystem(conf);
|
FileSystem fs = backupRootPath.getFileSystem(conf);
|
||||||
Path manifestPath =
|
Path manifestPath = new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR
|
||||||
new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR
|
+ BackupManifest.MANIFEST_FILE_NAME);
|
||||||
+ BackupManifest.MANIFEST_FILE_NAME);
|
|
||||||
if (!fs.exists(manifestPath)) {
|
if (!fs.exists(manifestPath)) {
|
||||||
String errorMsg =
|
String errorMsg = "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME
|
||||||
"Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for "
|
+ " for " + backupId + ". File " + manifestPath + " does not exists. Did " + backupId
|
||||||
+ backupId + ". File " + manifestPath + " does not exists. Did " + backupId
|
+ " correspond to previously taken backup ?";
|
||||||
+ " correspond to previously taken backup ?";
|
|
||||||
throw new IOException(errorMsg);
|
throw new IOException(errorMsg);
|
||||||
}
|
}
|
||||||
return manifestPath;
|
return manifestPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static BackupManifest
|
public static BackupManifest getManifest(Configuration conf, Path backupRootPath, String backupId)
|
||||||
getManifest(Configuration conf, Path backupRootPath, String backupId) throws IOException {
|
throws IOException {
|
||||||
BackupManifest manifest =
|
BackupManifest manifest =
|
||||||
new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId));
|
new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId));
|
||||||
return manifest;
|
return manifest;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check whether the backup image path and there is manifest file in the path.
|
* Check whether the backup image path and there is manifest file in the path.
|
||||||
* @param backupManifestMap If all the manifests are found, then they are put into this map
|
* @param backupManifestMap If all the manifests are found, then they are put into this map
|
||||||
* @param tableArray the tables involved
|
* @param tableArray the tables involved
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
public static void checkImageManifestExist(HashMap<TableName, BackupManifest> backupManifestMap,
|
public static void checkImageManifestExist(HashMap<TableName, BackupManifest> backupManifestMap,
|
||||||
TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId)
|
TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
for (TableName tableName : tableArray) {
|
for (TableName tableName : tableArray) {
|
||||||
BackupManifest manifest = getManifest(conf, backupRootPath, backupId);
|
BackupManifest manifest = getManifest(conf, backupRootPath, backupId);
|
||||||
backupManifestMap.put(tableName, manifest);
|
backupManifestMap.put(tableName, manifest);
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -59,9 +59,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
||||||
import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
|
import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
|
||||||
* Command-line entry point for restore operation
|
* Command-line entry point for restore operation
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class RestoreDriver extends AbstractHBaseTool {
|
public class RestoreDriver extends AbstractHBaseTool {
|
||||||
|
@ -69,10 +67,10 @@ public class RestoreDriver extends AbstractHBaseTool {
|
||||||
private CommandLine cmd;
|
private CommandLine cmd;
|
||||||
|
|
||||||
private static final String USAGE_STRING =
|
private static final String USAGE_STRING =
|
||||||
"Usage: hbase restore <backup_path> <backup_id> [options]\n"
|
"Usage: hbase restore <backup_path> <backup_id> [options]\n"
|
||||||
+ " backup_path Path to a backup destination root\n"
|
+ " backup_path Path to a backup destination root\n"
|
||||||
+ " backup_id Backup image ID to restore\n"
|
+ " backup_id Backup image ID to restore\n"
|
||||||
+ " table(s) Comma-separated list of tables to restore\n";
|
+ " table(s) Comma-separated list of tables to restore\n";
|
||||||
|
|
||||||
private static final String USAGE_FOOTER = "";
|
private static final String USAGE_FOOTER = "";
|
||||||
|
|
||||||
|
@ -101,19 +99,19 @@ public class RestoreDriver extends AbstractHBaseTool {
|
||||||
boolean overwrite = cmd.hasOption(OPTION_OVERWRITE);
|
boolean overwrite = cmd.hasOption(OPTION_OVERWRITE);
|
||||||
if (overwrite) {
|
if (overwrite) {
|
||||||
LOG.debug("Found -overwrite option in restore command, "
|
LOG.debug("Found -overwrite option in restore command, "
|
||||||
+ "will overwrite to existing table if any in the restore target");
|
+ "will overwrite to existing table if any in the restore target");
|
||||||
}
|
}
|
||||||
|
|
||||||
// whether to only check the dependencies, false by default
|
// whether to only check the dependencies, false by default
|
||||||
boolean check = cmd.hasOption(OPTION_CHECK);
|
boolean check = cmd.hasOption(OPTION_CHECK);
|
||||||
if (check) {
|
if (check) {
|
||||||
LOG.debug("Found -check option in restore command, "
|
LOG.debug(
|
||||||
+ "will check and verify the dependencies");
|
"Found -check option in restore command, " + "will check and verify the dependencies");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.hasOption(OPTION_SET) && cmd.hasOption(OPTION_TABLE)) {
|
if (cmd.hasOption(OPTION_SET) && cmd.hasOption(OPTION_TABLE)) {
|
||||||
System.err.println("Options -s and -t are mutaully exclusive,"+
|
System.err.println(
|
||||||
" you can not specify both of them.");
|
"Options -s and -t are mutaully exclusive," + " you can not specify both of them.");
|
||||||
printToolUsage();
|
printToolUsage();
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -141,9 +139,9 @@ public class RestoreDriver extends AbstractHBaseTool {
|
||||||
String backupId = remainArgs[1];
|
String backupId = remainArgs[1];
|
||||||
String tables;
|
String tables;
|
||||||
String tableMapping =
|
String tableMapping =
|
||||||
cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
|
cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
|
||||||
try (final Connection conn = ConnectionFactory.createConnection(conf);
|
try (final Connection conn = ConnectionFactory.createConnection(conf);
|
||||||
BackupAdmin client = new BackupAdminImpl(conn)) {
|
BackupAdmin client = new BackupAdminImpl(conn)) {
|
||||||
// Check backup set
|
// Check backup set
|
||||||
if (cmd.hasOption(OPTION_SET)) {
|
if (cmd.hasOption(OPTION_SET)) {
|
||||||
String setName = cmd.getOptionValue(OPTION_SET);
|
String setName = cmd.getOptionValue(OPTION_SET);
|
||||||
|
@ -155,8 +153,8 @@ public class RestoreDriver extends AbstractHBaseTool {
|
||||||
return -2;
|
return -2;
|
||||||
}
|
}
|
||||||
if (tables == null) {
|
if (tables == null) {
|
||||||
System.out.println("ERROR: Backup set '" + setName
|
System.out
|
||||||
+ "' is either empty or does not exist");
|
.println("ERROR: Backup set '" + setName + "' is either empty or does not exist");
|
||||||
printToolUsage();
|
printToolUsage();
|
||||||
return -3;
|
return -3;
|
||||||
}
|
}
|
||||||
|
@ -167,15 +165,16 @@ public class RestoreDriver extends AbstractHBaseTool {
|
||||||
TableName[] sTableArray = BackupUtils.parseTableNames(tables);
|
TableName[] sTableArray = BackupUtils.parseTableNames(tables);
|
||||||
TableName[] tTableArray = BackupUtils.parseTableNames(tableMapping);
|
TableName[] tTableArray = BackupUtils.parseTableNames(tableMapping);
|
||||||
|
|
||||||
if (sTableArray != null && tTableArray != null &&
|
if (
|
||||||
(sTableArray.length != tTableArray.length)) {
|
sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)
|
||||||
|
) {
|
||||||
System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping);
|
System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping);
|
||||||
printToolUsage();
|
printToolUsage();
|
||||||
return -4;
|
return -4;
|
||||||
}
|
}
|
||||||
|
|
||||||
client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check,
|
client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check, sTableArray,
|
||||||
sTableArray, tTableArray, overwrite));
|
tTableArray, overwrite));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.error("Error while running restore backup", e);
|
LOG.error("Error while running restore backup", e);
|
||||||
return -5;
|
return -5;
|
||||||
|
@ -184,7 +183,7 @@ public class RestoreDriver extends AbstractHBaseTool {
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getTablesForSet(Connection conn, String name, Configuration conf)
|
private String getTablesForSet(Connection conn, String name, Configuration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
|
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
|
||||||
List<TableName> tables = table.describeBackupSet(name);
|
List<TableName> tables = table.describeBackupSet(name);
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,11 +15,9 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup;
|
package org.apache.hadoop.hbase.backup;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configurable;
|
import org.apache.hadoop.conf.Configurable;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
@ -34,12 +32,12 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
public interface RestoreJob extends Configurable {
|
public interface RestoreJob extends Configurable {
|
||||||
/**
|
/**
|
||||||
* Run restore operation
|
* Run restore operation
|
||||||
* @param dirPaths path array of WAL log directories
|
* @param dirPaths path array of WAL log directories
|
||||||
* @param fromTables from tables
|
* @param fromTables from tables
|
||||||
* @param toTables to tables
|
* @param toTables to tables
|
||||||
* @param fullBackupRestore full backup restore
|
* @param fullBackupRestore full backup restore
|
||||||
* @throws IOException if running the job fails
|
* @throws IOException if running the job fails
|
||||||
*/
|
*/
|
||||||
void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables,
|
void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, boolean fullBackupRestore)
|
||||||
boolean fullBackupRestore) throws IOException;
|
throws IOException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -25,7 +25,6 @@ import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
@ -57,7 +56,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
public class BackupAdminImpl implements BackupAdmin {
|
public class BackupAdminImpl implements BackupAdmin {
|
||||||
public final static String CHECK_OK = "Checking backup images: OK";
|
public final static String CHECK_OK = "Checking backup images: OK";
|
||||||
public final static String CHECK_FAILED =
|
public final static String CHECK_FAILED =
|
||||||
"Checking backup images: Failed. Some dependencies are missing for restore";
|
"Checking backup images: Failed. Some dependencies are missing for restore";
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(BackupAdminImpl.class);
|
private static final Logger LOG = LoggerFactory.getLogger(BackupAdminImpl.class);
|
||||||
|
|
||||||
private final Connection conn;
|
private final Connection conn;
|
||||||
|
@ -107,8 +106,8 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
deleteSessionStarted = true;
|
deleteSessionStarted = true;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("You can not run delete command while active backup session is in progress. \n"
|
LOG.warn("You can not run delete command while active backup session is in progress. \n"
|
||||||
+ "If there is no active backup session running, run backup repair utility to "
|
+ "If there is no active backup session running, run backup repair utility to "
|
||||||
+ "restore \nbackup system integrity.");
|
+ "restore \nbackup system integrity.");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,7 +157,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
BackupSystemTable.deleteSnapshot(conn);
|
BackupSystemTable.deleteSnapshot(conn);
|
||||||
// We still have record with unfinished delete operation
|
// We still have record with unfinished delete operation
|
||||||
LOG.error("Delete operation failed, please run backup repair utility to restore "
|
LOG.error("Delete operation failed, please run backup repair utility to restore "
|
||||||
+ "backup system integrity", e);
|
+ "backup system integrity", e);
|
||||||
throw e;
|
throw e;
|
||||||
} else {
|
} else {
|
||||||
LOG.warn("Delete operation succeeded, there were some errors: ", e);
|
LOG.warn("Delete operation succeeded, there were some errors: ", e);
|
||||||
|
@ -177,15 +176,15 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
/**
|
/**
|
||||||
* Updates incremental backup set for every backupRoot
|
* Updates incremental backup set for every backupRoot
|
||||||
* @param tablesMap map [backupRoot: {@code Set<TableName>}]
|
* @param tablesMap map [backupRoot: {@code Set<TableName>}]
|
||||||
* @param table backup system table
|
* @param table backup system table
|
||||||
* @throws IOException if a table operation fails
|
* @throws IOException if a table operation fails
|
||||||
*/
|
*/
|
||||||
private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
|
private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
for (String backupRoot : tablesMap.keySet()) {
|
for (String backupRoot : tablesMap.keySet()) {
|
||||||
Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
|
Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
|
||||||
Map<TableName, ArrayList<BackupInfo>> tableMap =
|
Map<TableName, ArrayList<BackupInfo>> tableMap =
|
||||||
table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
|
table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
|
||||||
for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) {
|
for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) {
|
||||||
if (entry.getValue() == null) {
|
if (entry.getValue() == null) {
|
||||||
// No more backups for a table
|
// No more backups for a table
|
||||||
|
@ -283,10 +282,10 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable)
|
private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
List<TableName> tables = info.getTableNames();
|
List<TableName> tables = info.getTableNames();
|
||||||
LOG.debug("Remove " + tn + " from " + info.getBackupId() + " tables="
|
LOG.debug(
|
||||||
+ info.getTableListAsString());
|
"Remove " + tn + " from " + info.getBackupId() + " tables=" + info.getTableListAsString());
|
||||||
if (tables.contains(tn)) {
|
if (tables.contains(tn)) {
|
||||||
tables.remove(tn);
|
tables.remove(tn);
|
||||||
|
|
||||||
|
@ -306,7 +305,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<BackupInfo> getAffectedBackupSessions(BackupInfo backupInfo, TableName tn,
|
private List<BackupInfo> getAffectedBackupSessions(BackupInfo backupInfo, TableName tn,
|
||||||
BackupSystemTable table) throws IOException {
|
BackupSystemTable table) throws IOException {
|
||||||
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
|
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
|
||||||
long ts = backupInfo.getStartTs();
|
long ts = backupInfo.getStartTs();
|
||||||
List<BackupInfo> list = new ArrayList<>();
|
List<BackupInfo> list = new ArrayList<>();
|
||||||
|
@ -325,7 +324,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
list.clear();
|
list.clear();
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn
|
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn
|
||||||
+ " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
|
+ " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
|
||||||
list.add(info);
|
list.add(info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -338,7 +337,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
* @throws IOException if cleaning up the backup directory fails
|
* @throws IOException if cleaning up the backup directory fails
|
||||||
*/
|
*/
|
||||||
private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf)
|
private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
try {
|
try {
|
||||||
// clean up the data at target directory
|
// clean up the data at target directory
|
||||||
String targetDir = backupInfo.getBackupRootDir();
|
String targetDir = backupInfo.getBackupRootDir();
|
||||||
|
@ -349,9 +348,8 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
|
|
||||||
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
|
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
|
||||||
|
|
||||||
Path targetDirPath =
|
Path targetDirPath = new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(),
|
||||||
new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(),
|
backupInfo.getBackupId(), table));
|
||||||
backupInfo.getBackupId(), table));
|
|
||||||
if (outputFs.delete(targetDirPath, true)) {
|
if (outputFs.delete(targetDirPath, true)) {
|
||||||
LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
|
LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
|
||||||
} else {
|
} else {
|
||||||
|
@ -359,13 +357,13 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
}
|
}
|
||||||
} catch (IOException e1) {
|
} catch (IOException e1) {
|
||||||
LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table
|
LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table
|
||||||
+ "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
|
+ "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
|
||||||
throw e1;
|
throw e1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime)
|
private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
List<BackupInfo> history = table.getBackupHistory();
|
List<BackupInfo> history = table.getBackupHistory();
|
||||||
for (BackupInfo info : history) {
|
for (BackupInfo info : history) {
|
||||||
List<TableName> tables = info.getTableNames();
|
List<TableName> tables = info.getTableNames();
|
||||||
|
@ -466,7 +464,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
public void addToBackupSet(String name, TableName[] tables) throws IOException {
|
public void addToBackupSet(String name, TableName[] tables) throws IOException {
|
||||||
String[] tableNames = new String[tables.length];
|
String[] tableNames = new String[tables.length];
|
||||||
try (final BackupSystemTable table = new BackupSystemTable(conn);
|
try (final BackupSystemTable table = new BackupSystemTable(conn);
|
||||||
final Admin admin = conn.getAdmin()) {
|
final Admin admin = conn.getAdmin()) {
|
||||||
for (int i = 0; i < tables.length; i++) {
|
for (int i = 0; i < tables.length; i++) {
|
||||||
tableNames[i] = tables[i].getNameAsString();
|
tableNames[i] = tables[i].getNameAsString();
|
||||||
if (!admin.tableExists(TableName.valueOf(tableNames[i]))) {
|
if (!admin.tableExists(TableName.valueOf(tableNames[i]))) {
|
||||||
|
@ -474,8 +472,8 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
table.addToBackupSet(name, tableNames);
|
table.addToBackupSet(name, tableNames);
|
||||||
LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name
|
LOG.info(
|
||||||
+ "' backup set");
|
"Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + "' backup set");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,8 +482,8 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'");
|
LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'");
|
||||||
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
|
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
|
||||||
table.removeFromBackupSet(name, toStringArray(tables));
|
table.removeFromBackupSet(name, toStringArray(tables));
|
||||||
LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name
|
LOG.info(
|
||||||
+ "' completed.");
|
"Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "' completed.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -534,9 +532,9 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (incrTableSet.isEmpty()) {
|
if (incrTableSet.isEmpty()) {
|
||||||
String msg = "Incremental backup table set contains no tables. "
|
String msg =
|
||||||
+ "You need to run full backup first "
|
"Incremental backup table set contains no tables. " + "You need to run full backup first "
|
||||||
+ (tableList != null ? "on " + StringUtils.join(tableList, ",") : "");
|
+ (tableList != null ? "on " + StringUtils.join(tableList, ",") : "");
|
||||||
|
|
||||||
throw new IOException(msg);
|
throw new IOException(msg);
|
||||||
}
|
}
|
||||||
|
@ -545,7 +543,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
if (!tableList.isEmpty()) {
|
if (!tableList.isEmpty()) {
|
||||||
String extraTables = StringUtils.join(tableList, ",");
|
String extraTables = StringUtils.join(tableList, ",");
|
||||||
String msg = "Some tables (" + extraTables + ") haven't gone through full backup. "
|
String msg = "Some tables (" + extraTables + ") haven't gone through full backup. "
|
||||||
+ "Perform full backup on " + extraTables + " first, " + "then retry the command";
|
+ "Perform full backup on " + extraTables + " first, " + "then retry the command";
|
||||||
throw new IOException(msg);
|
throw new IOException(msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -554,13 +552,13 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
if (tableList != null && !tableList.isEmpty()) {
|
if (tableList != null && !tableList.isEmpty()) {
|
||||||
for (TableName table : tableList) {
|
for (TableName table : tableList) {
|
||||||
String targetTableBackupDir =
|
String targetTableBackupDir =
|
||||||
HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
|
HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
|
||||||
Path targetTableBackupDirPath = new Path(targetTableBackupDir);
|
Path targetTableBackupDirPath = new Path(targetTableBackupDir);
|
||||||
FileSystem outputFs =
|
FileSystem outputFs =
|
||||||
FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration());
|
FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration());
|
||||||
if (outputFs.exists(targetTableBackupDirPath)) {
|
if (outputFs.exists(targetTableBackupDirPath)) {
|
||||||
throw new IOException("Target backup directory " + targetTableBackupDir
|
throw new IOException(
|
||||||
+ " exists already.");
|
"Target backup directory " + targetTableBackupDir + " exists already.");
|
||||||
}
|
}
|
||||||
outputFs.mkdirs(targetTableBackupDirPath);
|
outputFs.mkdirs(targetTableBackupDirPath);
|
||||||
}
|
}
|
||||||
|
@ -581,8 +579,8 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
tableList = excludeNonExistingTables(tableList, nonExistingTableList);
|
tableList = excludeNonExistingTables(tableList, nonExistingTableList);
|
||||||
} else {
|
} else {
|
||||||
// Throw exception only in full mode - we try to backup non-existing table
|
// Throw exception only in full mode - we try to backup non-existing table
|
||||||
throw new IOException("Non-existing tables found in the table list: "
|
throw new IOException(
|
||||||
+ nonExistingTableList);
|
"Non-existing tables found in the table list: " + nonExistingTableList);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -590,9 +588,9 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
// update table list
|
// update table list
|
||||||
BackupRequest.Builder builder = new BackupRequest.Builder();
|
BackupRequest.Builder builder = new BackupRequest.Builder();
|
||||||
request = builder.withBackupType(request.getBackupType()).withTableList(tableList)
|
request = builder.withBackupType(request.getBackupType()).withTableList(tableList)
|
||||||
.withTargetRootDir(request.getTargetRootDir())
|
.withTargetRootDir(request.getTargetRootDir()).withBackupSetName(request.getBackupSetName())
|
||||||
.withBackupSetName(request.getBackupSetName()).withTotalTasks(request.getTotalTasks())
|
.withTotalTasks(request.getTotalTasks()).withBandwidthPerTasks((int) request.getBandwidth())
|
||||||
.withBandwidthPerTasks((int) request.getBandwidth()).build();
|
.build();
|
||||||
|
|
||||||
TableBackupClient client;
|
TableBackupClient client;
|
||||||
try {
|
try {
|
||||||
|
@ -608,7 +606,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<TableName> excludeNonExistingTables(List<TableName> tableList,
|
private List<TableName> excludeNonExistingTables(List<TableName> tableList,
|
||||||
List<TableName> nonExistingTableList) {
|
List<TableName> nonExistingTableList) {
|
||||||
for (TableName table : nonExistingTableList) {
|
for (TableName table : nonExistingTableList) {
|
||||||
tableList.remove(table);
|
tableList.remove(table);
|
||||||
}
|
}
|
||||||
|
@ -619,7 +617,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
public void mergeBackups(String[] backupIds) throws IOException {
|
public void mergeBackups(String[] backupIds) throws IOException {
|
||||||
try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
|
try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
|
||||||
checkIfValidForMerge(backupIds, sysTable);
|
checkIfValidForMerge(backupIds, sysTable);
|
||||||
//TODO run job on remote cluster
|
// TODO run job on remote cluster
|
||||||
BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration());
|
BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration());
|
||||||
job.run(backupIds);
|
job.run(backupIds);
|
||||||
}
|
}
|
||||||
|
@ -627,7 +625,6 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verifies that backup images are valid for merge.
|
* Verifies that backup images are valid for merge.
|
||||||
*
|
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>All backups MUST be in the same destination
|
* <li>All backups MUST be in the same destination
|
||||||
* <li>No FULL backups are allowed - only INCREMENTAL
|
* <li>No FULL backups are allowed - only INCREMENTAL
|
||||||
|
@ -636,11 +633,11 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
* </ul>
|
* </ul>
|
||||||
* <p>
|
* <p>
|
||||||
* @param backupIds list of backup ids
|
* @param backupIds list of backup ids
|
||||||
* @param table backup system table
|
* @param table backup system table
|
||||||
* @throws IOException if the backup image is not valid for merge
|
* @throws IOException if the backup image is not valid for merge
|
||||||
*/
|
*/
|
||||||
private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table)
|
private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String backupRoot = null;
|
String backupRoot = null;
|
||||||
|
|
||||||
final Set<TableName> allTables = new HashSet<>();
|
final Set<TableName> allTables = new HashSet<>();
|
||||||
|
@ -656,7 +653,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
backupRoot = bInfo.getBackupRootDir();
|
backupRoot = bInfo.getBackupRootDir();
|
||||||
} else if (!bInfo.getBackupRootDir().equals(backupRoot)) {
|
} else if (!bInfo.getBackupRootDir().equals(backupRoot)) {
|
||||||
throw new IOException("Found different backup destinations in a list of a backup sessions "
|
throw new IOException("Found different backup destinations in a list of a backup sessions "
|
||||||
+ "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir());
|
+ "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir());
|
||||||
}
|
}
|
||||||
if (bInfo.getType() == BackupType.FULL) {
|
if (bInfo.getType() == BackupType.FULL) {
|
||||||
throw new IOException("FULL backup image can not be merged for: \n" + bInfo);
|
throw new IOException("FULL backup image can not be merged for: \n" + bInfo);
|
||||||
|
@ -664,7 +661,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
|
|
||||||
if (bInfo.getState() != BackupState.COMPLETE) {
|
if (bInfo.getState() != BackupState.COMPLETE) {
|
||||||
throw new IOException("Backup image " + backupId
|
throw new IOException("Backup image " + backupId
|
||||||
+ " can not be merged becuase of its state: " + bInfo.getState());
|
+ " can not be merged becuase of its state: " + bInfo.getState());
|
||||||
}
|
}
|
||||||
allBackups.add(backupId);
|
allBackups.add(backupId);
|
||||||
allTables.addAll(bInfo.getTableNames());
|
allTables.addAll(bInfo.getTableNames());
|
||||||
|
@ -677,7 +674,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
final long startRangeTime = minTime;
|
final long startRangeTime = minTime;
|
||||||
final long endRangeTime = maxTime;
|
final long endRangeTime = maxTime;
|
||||||
final String backupDest = backupRoot;
|
final String backupDest = backupRoot;
|
||||||
// Check we have no 'holes' in backup id list
|
// Check we have no 'holes' in backup id list
|
||||||
|
@ -688,7 +685,7 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
|
|
||||||
BackupInfo.Filter timeRangeFilter = info -> {
|
BackupInfo.Filter timeRangeFilter = info -> {
|
||||||
long time = info.getStartTs();
|
long time = info.getStartTs();
|
||||||
return time >= startRangeTime && time <= endRangeTime ;
|
return time >= startRangeTime && time <= endRangeTime;
|
||||||
};
|
};
|
||||||
|
|
||||||
BackupInfo.Filter tableFilter = info -> {
|
BackupInfo.Filter tableFilter = info -> {
|
||||||
|
@ -699,20 +696,20 @@ public class BackupAdminImpl implements BackupAdmin {
|
||||||
BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL;
|
BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL;
|
||||||
BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE;
|
BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE;
|
||||||
|
|
||||||
List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter,
|
List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter, timeRangeFilter,
|
||||||
timeRangeFilter, tableFilter, typeFilter, stateFilter);
|
tableFilter, typeFilter, stateFilter);
|
||||||
if (allInfos.size() != allBackups.size()) {
|
if (allInfos.size() != allBackups.size()) {
|
||||||
// Yes we have at least one hole in backup image sequence
|
// Yes we have at least one hole in backup image sequence
|
||||||
List<String> missingIds = new ArrayList<>();
|
List<String> missingIds = new ArrayList<>();
|
||||||
for(BackupInfo info: allInfos) {
|
for (BackupInfo info : allInfos) {
|
||||||
if(allBackups.contains(info.getBackupId())) {
|
if (allBackups.contains(info.getBackupId())) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
missingIds.add(info.getBackupId());
|
missingIds.add(info.getBackupId());
|
||||||
}
|
}
|
||||||
String errMsg =
|
String errMsg =
|
||||||
"Sequence of backup ids has 'holes'. The following backup images must be added:" +
|
"Sequence of backup ids has 'holes'. The following backup images must be added:"
|
||||||
org.apache.hadoop.util.StringUtils.join(",", missingIds);
|
+ org.apache.hadoop.util.StringUtils.join(",", missingIds);
|
||||||
throw new IOException(errMsg);
|
throw new IOException(errMsg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,7 +15,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup.impl;
|
package org.apache.hadoop.hbase.backup.impl;
|
||||||
|
|
||||||
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC;
|
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC;
|
||||||
|
@ -44,7 +43,6 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
|
@ -80,33 +78,32 @@ public final class BackupCommands {
|
||||||
public final static String INCORRECT_USAGE = "Incorrect usage";
|
public final static String INCORRECT_USAGE = "Incorrect usage";
|
||||||
|
|
||||||
public final static String TOP_LEVEL_NOT_ALLOWED =
|
public final static String TOP_LEVEL_NOT_ALLOWED =
|
||||||
"Top level (root) folder is not allowed to be a backup destination";
|
"Top level (root) folder is not allowed to be a backup destination";
|
||||||
|
|
||||||
public static final String USAGE = "Usage: hbase backup COMMAND [command-specific arguments]\n"
|
public static final String USAGE = "Usage: hbase backup COMMAND [command-specific arguments]\n"
|
||||||
+ "where COMMAND is one of:\n" + " create create a new backup image\n"
|
+ "where COMMAND is one of:\n" + " create create a new backup image\n"
|
||||||
+ " delete delete an existing backup image\n"
|
+ " delete delete an existing backup image\n"
|
||||||
+ " describe show the detailed information of a backup image\n"
|
+ " describe show the detailed information of a backup image\n"
|
||||||
+ " history show history of all successful backups\n"
|
+ " history show history of all successful backups\n"
|
||||||
+ " progress show the progress of the latest backup request\n"
|
+ " progress show the progress of the latest backup request\n"
|
||||||
+ " set backup set management\n"
|
+ " set backup set management\n" + " repair repair backup system table\n"
|
||||||
+ " repair repair backup system table\n"
|
+ " merge merge backup images\n"
|
||||||
+ " merge merge backup images\n"
|
+ "Run \'hbase backup COMMAND -h\' to see help message for each command\n";
|
||||||
+ "Run \'hbase backup COMMAND -h\' to see help message for each command\n";
|
|
||||||
|
|
||||||
public static final String CREATE_CMD_USAGE =
|
public static final String CREATE_CMD_USAGE =
|
||||||
"Usage: hbase backup create <type> <backup_path> [options]\n"
|
"Usage: hbase backup create <type> <backup_path> [options]\n"
|
||||||
+ " type \"full\" to create a full backup image\n"
|
+ " type \"full\" to create a full backup image\n"
|
||||||
+ " \"incremental\" to create an incremental backup image\n"
|
+ " \"incremental\" to create an incremental backup image\n"
|
||||||
+ " backup_path Full path to store the backup image\n";
|
+ " backup_path Full path to store the backup image\n";
|
||||||
|
|
||||||
public static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress <backup_id>\n"
|
public static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress <backup_id>\n"
|
||||||
+ " backup_id Backup image id (optional). If no id specified, the command will show\n"
|
+ " backup_id Backup image id (optional). If no id specified, the command will show\n"
|
||||||
+ " progress for currently running backup session.";
|
+ " progress for currently running backup session.";
|
||||||
public static final String NO_INFO_FOUND = "No info was found for backup id: ";
|
public static final String NO_INFO_FOUND = "No info was found for backup id: ";
|
||||||
public static final String NO_ACTIVE_SESSION_FOUND = "No active backup sessions found.";
|
public static final String NO_ACTIVE_SESSION_FOUND = "No active backup sessions found.";
|
||||||
|
|
||||||
public static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup describe <backup_id>\n"
|
public static final String DESCRIBE_CMD_USAGE =
|
||||||
+ " backup_id Backup image id\n";
|
"Usage: hbase backup describe <backup_id>\n" + " backup_id Backup image id\n";
|
||||||
|
|
||||||
public static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [options]";
|
public static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [options]";
|
||||||
|
|
||||||
|
@ -115,14 +112,13 @@ public final class BackupCommands {
|
||||||
public static final String REPAIR_CMD_USAGE = "Usage: hbase backup repair\n";
|
public static final String REPAIR_CMD_USAGE = "Usage: hbase backup repair\n";
|
||||||
|
|
||||||
public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n"
|
public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n"
|
||||||
+ " name Backup set name\n"
|
+ " name Backup set name\n" + " tables Comma separated list of tables.\n"
|
||||||
+ " tables Comma separated list of tables.\n" + "COMMAND is one of:\n"
|
+ "COMMAND is one of:\n" + " add add tables to a set, create a set if needed\n"
|
||||||
+ " add add tables to a set, create a set if needed\n"
|
+ " remove remove tables from a set\n"
|
||||||
+ " remove remove tables from a set\n"
|
+ " list list all backup sets in the system\n" + " describe describe set\n"
|
||||||
+ " list list all backup sets in the system\n"
|
+ " delete delete backup set\n";
|
||||||
+ " describe describe set\n" + " delete delete backup set\n";
|
|
||||||
public static final String MERGE_CMD_USAGE = "Usage: hbase backup merge [backup_ids]\n"
|
public static final String MERGE_CMD_USAGE = "Usage: hbase backup merge [backup_ids]\n"
|
||||||
+ " backup_ids Comma separated list of backup image ids.\n";
|
+ " backup_ids Comma separated list of backup image ids.\n";
|
||||||
|
|
||||||
public static final String USAGE_FOOTER = "";
|
public static final String USAGE_FOOTER = "";
|
||||||
|
|
||||||
|
@ -281,8 +277,10 @@ public final class BackupCommands {
|
||||||
throw new IOException(INCORRECT_USAGE);
|
throw new IOException(INCORRECT_USAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!BackupType.FULL.toString().equalsIgnoreCase(args[1])
|
if (
|
||||||
&& !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])) {
|
!BackupType.FULL.toString().equalsIgnoreCase(args[1])
|
||||||
|
&& !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])
|
||||||
|
) {
|
||||||
System.out.println("ERROR: invalid backup type: " + args[1]);
|
System.out.println("ERROR: invalid backup type: " + args[1]);
|
||||||
printUsage();
|
printUsage();
|
||||||
throw new IOException(INCORRECT_USAGE);
|
throw new IOException(INCORRECT_USAGE);
|
||||||
|
@ -301,8 +299,8 @@ public final class BackupCommands {
|
||||||
|
|
||||||
// Check if we have both: backup set and list of tables
|
// Check if we have both: backup set and list of tables
|
||||||
if (cmdline.hasOption(OPTION_TABLE) && cmdline.hasOption(OPTION_SET)) {
|
if (cmdline.hasOption(OPTION_TABLE) && cmdline.hasOption(OPTION_SET)) {
|
||||||
System.out.println("ERROR: You can specify either backup set or list"
|
System.out
|
||||||
+ " of tables, but not both");
|
.println("ERROR: You can specify either backup set or list" + " of tables, but not both");
|
||||||
printUsage();
|
printUsage();
|
||||||
throw new IOException(INCORRECT_USAGE);
|
throw new IOException(INCORRECT_USAGE);
|
||||||
}
|
}
|
||||||
|
@ -315,20 +313,20 @@ public final class BackupCommands {
|
||||||
tables = getTablesForSet(setName, getConf());
|
tables = getTablesForSet(setName, getConf());
|
||||||
|
|
||||||
if (tables == null) {
|
if (tables == null) {
|
||||||
System.out.println("ERROR: Backup set '" + setName
|
System.out
|
||||||
+ "' is either empty or does not exist");
|
.println("ERROR: Backup set '" + setName + "' is either empty or does not exist");
|
||||||
printUsage();
|
printUsage();
|
||||||
throw new IOException(INCORRECT_USAGE);
|
throw new IOException(INCORRECT_USAGE);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tables = cmdline.getOptionValue(OPTION_TABLE);
|
tables = cmdline.getOptionValue(OPTION_TABLE);
|
||||||
}
|
}
|
||||||
int bandwidth =
|
int bandwidth = cmdline.hasOption(OPTION_BANDWIDTH)
|
||||||
cmdline.hasOption(OPTION_BANDWIDTH) ? Integer.parseInt(cmdline
|
? Integer.parseInt(cmdline.getOptionValue(OPTION_BANDWIDTH))
|
||||||
.getOptionValue(OPTION_BANDWIDTH)) : -1;
|
: -1;
|
||||||
int workers =
|
int workers = cmdline.hasOption(OPTION_WORKERS)
|
||||||
cmdline.hasOption(OPTION_WORKERS) ? Integer.parseInt(cmdline
|
? Integer.parseInt(cmdline.getOptionValue(OPTION_WORKERS))
|
||||||
.getOptionValue(OPTION_WORKERS)) : -1;
|
: -1;
|
||||||
|
|
||||||
if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) {
|
if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) {
|
||||||
String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME);
|
String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME);
|
||||||
|
@ -338,13 +336,11 @@ public final class BackupCommands {
|
||||||
|
|
||||||
try (BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
try (BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
||||||
BackupRequest.Builder builder = new BackupRequest.Builder();
|
BackupRequest.Builder builder = new BackupRequest.Builder();
|
||||||
BackupRequest request =
|
BackupRequest request = builder.withBackupType(BackupType.valueOf(args[1].toUpperCase()))
|
||||||
builder
|
.withTableList(
|
||||||
.withBackupType(BackupType.valueOf(args[1].toUpperCase()))
|
tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null)
|
||||||
.withTableList(
|
.withTargetRootDir(targetBackupDir).withTotalTasks(workers)
|
||||||
tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null)
|
.withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build();
|
||||||
.withTargetRootDir(targetBackupDir).withTotalTasks(workers)
|
|
||||||
.withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build();
|
|
||||||
String backupId = admin.backupTables(request);
|
String backupId = admin.backupTables(request);
|
||||||
System.out.println("Backup session " + backupId + " finished. Status: SUCCESS");
|
System.out.println("Backup session " + backupId + " finished. Status: SUCCESS");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
@ -506,8 +502,8 @@ public final class BackupCommands {
|
||||||
public void execute() throws IOException {
|
public void execute() throws IOException {
|
||||||
|
|
||||||
if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 1) {
|
if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 1) {
|
||||||
System.out.println("No backup id was specified, "
|
System.out.println(
|
||||||
+ "will retrieve the most recent (ongoing) session");
|
"No backup id was specified, " + "will retrieve the most recent (ongoing) session");
|
||||||
}
|
}
|
||||||
String[] args = cmdline == null ? null : cmdline.getArgs();
|
String[] args = cmdline == null ? null : cmdline.getArgs();
|
||||||
if (args != null && args.length > 2) {
|
if (args != null && args.length > 2) {
|
||||||
|
@ -601,15 +597,15 @@ public final class BackupCommands {
|
||||||
};
|
};
|
||||||
List<BackupInfo> history = null;
|
List<BackupInfo> history = null;
|
||||||
try (final BackupSystemTable sysTable = new BackupSystemTable(conn);
|
try (final BackupSystemTable sysTable = new BackupSystemTable(conn);
|
||||||
BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
||||||
history = sysTable.getBackupHistory(-1, dateFilter);
|
history = sysTable.getBackupHistory(-1, dateFilter);
|
||||||
String[] backupIds = convertToBackupIds(history);
|
String[] backupIds = convertToBackupIds(history);
|
||||||
int deleted = admin.deleteBackups(backupIds);
|
int deleted = admin.deleteBackups(backupIds);
|
||||||
System.out.println("Deleted " + deleted + " backups. Total older than " + days + " days: "
|
System.out.println("Deleted " + deleted + " backups. Total older than " + days + " days: "
|
||||||
+ backupIds.length);
|
+ backupIds.length);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
System.err.println("Delete command FAILED. Please run backup repair tool to restore backup "
|
System.err.println("Delete command FAILED. Please run backup repair tool to restore backup "
|
||||||
+ "system integrity");
|
+ "system integrity");
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -631,7 +627,7 @@ public final class BackupCommands {
|
||||||
System.out.println("Deleted " + deleted + " backups. Total requested: " + backupIds.length);
|
System.out.println("Deleted " + deleted + " backups. Total requested: " + backupIds.length);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
System.err.println("Delete command FAILED. Please run backup repair tool to restore backup "
|
System.err.println("Delete command FAILED. Please run backup repair tool to restore backup "
|
||||||
+ "system integrity");
|
+ "system integrity");
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -673,14 +669,14 @@ public final class BackupCommands {
|
||||||
|
|
||||||
Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
|
Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
|
||||||
try (final Connection conn = ConnectionFactory.createConnection(conf);
|
try (final Connection conn = ConnectionFactory.createConnection(conf);
|
||||||
final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
|
final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
|
||||||
// Failed backup
|
// Failed backup
|
||||||
BackupInfo backupInfo;
|
BackupInfo backupInfo;
|
||||||
List<BackupInfo> list = sysTable.getBackupInfos(BackupState.RUNNING);
|
List<BackupInfo> list = sysTable.getBackupInfos(BackupState.RUNNING);
|
||||||
if (list.size() == 0) {
|
if (list.size() == 0) {
|
||||||
// No failed sessions found
|
// No failed sessions found
|
||||||
System.out.println("REPAIR status: no failed sessions found."
|
System.out.println("REPAIR status: no failed sessions found."
|
||||||
+ " Checking failed delete backup operation ...");
|
+ " Checking failed delete backup operation ...");
|
||||||
repairFailedBackupDeletionIfAny(conn, sysTable);
|
repairFailedBackupDeletionIfAny(conn, sysTable);
|
||||||
repairFailedBackupMergeIfAny(conn, sysTable);
|
repairFailedBackupMergeIfAny(conn, sysTable);
|
||||||
return;
|
return;
|
||||||
|
@ -694,10 +690,9 @@ public final class BackupCommands {
|
||||||
// set overall backup status: failed
|
// set overall backup status: failed
|
||||||
backupInfo.setState(BackupState.FAILED);
|
backupInfo.setState(BackupState.FAILED);
|
||||||
// compose the backup failed data
|
// compose the backup failed data
|
||||||
String backupFailedData =
|
String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts="
|
||||||
"BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs()
|
+ backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
|
||||||
+ ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
|
+ backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
|
||||||
+ backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
|
|
||||||
System.out.println(backupFailedData);
|
System.out.println(backupFailedData);
|
||||||
TableBackupClient.cleanupAndRestoreBackupSystem(conn, backupInfo, conf);
|
TableBackupClient.cleanupAndRestoreBackupSystem(conn, backupInfo, conf);
|
||||||
// If backup session is updated to FAILED state - means we
|
// If backup session is updated to FAILED state - means we
|
||||||
|
@ -709,7 +704,7 @@ public final class BackupCommands {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void repairFailedBackupDeletionIfAny(Connection conn, BackupSystemTable sysTable)
|
private void repairFailedBackupDeletionIfAny(Connection conn, BackupSystemTable sysTable)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String[] backupIds = sysTable.getListOfBackupIdsFromDeleteOperation();
|
String[] backupIds = sysTable.getListOfBackupIdsFromDeleteOperation();
|
||||||
if (backupIds == null || backupIds.length == 0) {
|
if (backupIds == null || backupIds.length == 0) {
|
||||||
System.out.println("No failed backup DELETE operation found");
|
System.out.println("No failed backup DELETE operation found");
|
||||||
|
@ -730,7 +725,7 @@ public final class BackupCommands {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTable sysTable)
|
public static void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTable sysTable)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
String[] backupIds = sysTable.getListOfBackupIdsFromMergeOperation();
|
String[] backupIds = sysTable.getListOfBackupIdsFromMergeOperation();
|
||||||
if (backupIds == null || backupIds.length == 0) {
|
if (backupIds == null || backupIds.length == 0) {
|
||||||
|
@ -754,9 +749,11 @@ public final class BackupCommands {
|
||||||
}
|
}
|
||||||
boolean res = fs.rename(tmpPath, destPath);
|
boolean res = fs.rename(tmpPath, destPath);
|
||||||
if (!res) {
|
if (!res) {
|
||||||
throw new IOException("MERGE repair: failed to rename from "+ tmpPath+" to "+ destPath);
|
throw new IOException(
|
||||||
|
"MERGE repair: failed to rename from " + tmpPath + " to " + destPath);
|
||||||
}
|
}
|
||||||
System.out.println("MERGE repair: renamed from "+ tmpPath+" to "+ destPath+" res="+ res);
|
System.out
|
||||||
|
.println("MERGE repair: renamed from " + tmpPath + " to " + destPath + " res=" + res);
|
||||||
} else {
|
} else {
|
||||||
checkRemoveBackupImages(fs, backupRoot, backupIds);
|
checkRemoveBackupImages(fs, backupRoot, backupIds);
|
||||||
}
|
}
|
||||||
|
@ -773,16 +770,16 @@ public final class BackupCommands {
|
||||||
private static void checkRemoveBackupImages(FileSystem fs, String backupRoot,
|
private static void checkRemoveBackupImages(FileSystem fs, String backupRoot,
|
||||||
String[] backupIds) throws IOException {
|
String[] backupIds) throws IOException {
|
||||||
String mergedBackupId = BackupUtils.findMostRecentBackupId(backupIds);
|
String mergedBackupId = BackupUtils.findMostRecentBackupId(backupIds);
|
||||||
for (String backupId: backupIds) {
|
for (String backupId : backupIds) {
|
||||||
if (backupId.equals(mergedBackupId)) {
|
if (backupId.equals(mergedBackupId)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Path path = HBackupFileSystem.getBackupPath(backupRoot, backupId);
|
Path path = HBackupFileSystem.getBackupPath(backupRoot, backupId);
|
||||||
if (fs.exists(path)) {
|
if (fs.exists(path)) {
|
||||||
if (!fs.delete(path, true)) {
|
if (!fs.delete(path, true)) {
|
||||||
System.out.println("MERGE repair removing: "+ path +" - FAILED");
|
System.out.println("MERGE repair removing: " + path + " - FAILED");
|
||||||
} else {
|
} else {
|
||||||
System.out.println("MERGE repair removing: "+ path +" - OK");
|
System.out.println("MERGE repair removing: " + path + " - OK");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -816,23 +813,23 @@ public final class BackupCommands {
|
||||||
|
|
||||||
String[] args = cmdline == null ? null : cmdline.getArgs();
|
String[] args = cmdline == null ? null : cmdline.getArgs();
|
||||||
if (args == null || (args.length != 2)) {
|
if (args == null || (args.length != 2)) {
|
||||||
System.err.println("ERROR: wrong number of arguments: "
|
System.err
|
||||||
+ (args == null ? null : args.length));
|
.println("ERROR: wrong number of arguments: " + (args == null ? null : args.length));
|
||||||
printUsage();
|
printUsage();
|
||||||
throw new IOException(INCORRECT_USAGE);
|
throw new IOException(INCORRECT_USAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
String[] backupIds = args[1].split(",");
|
String[] backupIds = args[1].split(",");
|
||||||
if (backupIds.length < 2) {
|
if (backupIds.length < 2) {
|
||||||
String msg = "ERROR: can not merge a single backup image. "+
|
String msg = "ERROR: can not merge a single backup image. "
|
||||||
"Number of images must be greater than 1.";
|
+ "Number of images must be greater than 1.";
|
||||||
System.err.println(msg);
|
System.err.println(msg);
|
||||||
throw new IOException(msg);
|
throw new IOException(msg);
|
||||||
|
|
||||||
}
|
}
|
||||||
Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
|
Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
|
||||||
try (final Connection conn = ConnectionFactory.createConnection(conf);
|
try (final Connection conn = ConnectionFactory.createConnection(conf);
|
||||||
final BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
final BackupAdminImpl admin = new BackupAdminImpl(conn)) {
|
||||||
admin.mergeBackups(backupIds);
|
admin.mergeBackups(backupIds);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -889,7 +886,7 @@ public final class BackupCommands {
|
||||||
} else {
|
} else {
|
||||||
// load from backup FS
|
// load from backup FS
|
||||||
history =
|
history =
|
||||||
BackupUtils.getHistory(getConf(), n, backupRootPath, tableNameFilter, tableSetFilter);
|
BackupUtils.getHistory(getConf(), n, backupRootPath, tableNameFilter, tableSetFilter);
|
||||||
}
|
}
|
||||||
for (BackupInfo info : history) {
|
for (BackupInfo info : history) {
|
||||||
System.out.println(info.getShortDescription());
|
System.out.println(info.getShortDescription());
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,7 +15,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup.impl;
|
package org.apache.hadoop.hbase.backup.impl;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HBaseIOException;
|
import org.apache.hadoop.hbase.HBaseIOException;
|
||||||
|
@ -48,7 +47,7 @@ public class BackupException extends HBaseIOException {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Exception for the given backup that has no previous root cause
|
* Exception for the given backup that has no previous root cause
|
||||||
* @param msg reason why the backup failed
|
* @param msg reason why the backup failed
|
||||||
* @param desc description of the backup that is being failed
|
* @param desc description of the backup that is being failed
|
||||||
*/
|
*/
|
||||||
public BackupException(String msg, BackupInfo desc) {
|
public BackupException(String msg, BackupInfo desc) {
|
||||||
|
@ -58,9 +57,9 @@ public class BackupException extends HBaseIOException {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Exception for the given backup due to another exception
|
* Exception for the given backup due to another exception
|
||||||
* @param msg reason why the backup failed
|
* @param msg reason why the backup failed
|
||||||
* @param cause root cause of the failure
|
* @param cause root cause of the failure
|
||||||
* @param desc description of the backup that is being failed
|
* @param desc description of the backup that is being failed
|
||||||
*/
|
*/
|
||||||
public BackupException(String msg, Throwable cause, BackupInfo desc) {
|
public BackupException(String msg, Throwable cause, BackupInfo desc) {
|
||||||
super(msg, cause);
|
super(msg, cause);
|
||||||
|
@ -68,10 +67,9 @@ public class BackupException extends HBaseIOException {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Exception when the description of the backup cannot be determined, due to some other root
|
* Exception when the description of the backup cannot be determined, due to some other root cause
|
||||||
* cause
|
|
||||||
* @param message description of what caused the failure
|
* @param message description of what caused the failure
|
||||||
* @param e root cause
|
* @param e root cause
|
||||||
*/
|
*/
|
||||||
public BackupException(String message, Exception e) {
|
public BackupException(String message, Exception e) {
|
||||||
super(message, e);
|
super(message, e);
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/**
|
/*
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -60,7 +59,7 @@ import org.slf4j.LoggerFactory;
|
||||||
public class BackupManager implements Closeable {
|
public class BackupManager implements Closeable {
|
||||||
// in seconds
|
// in seconds
|
||||||
public final static String BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY =
|
public final static String BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY =
|
||||||
"hbase.backup.exclusive.op.timeout.seconds";
|
"hbase.backup.exclusive.op.timeout.seconds";
|
||||||
// In seconds
|
// In seconds
|
||||||
private final static int DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT = 3600;
|
private final static int DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT = 3600;
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(BackupManager.class);
|
private static final Logger LOG = LoggerFactory.getLogger(BackupManager.class);
|
||||||
|
@ -77,10 +76,12 @@ public class BackupManager implements Closeable {
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
public BackupManager(Connection conn, Configuration conf) throws IOException {
|
public BackupManager(Connection conn, Configuration conf) throws IOException {
|
||||||
if (!conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
|
if (
|
||||||
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) {
|
!conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
|
||||||
|
BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)
|
||||||
|
) {
|
||||||
throw new BackupException("HBase backup is not enabled. Check your "
|
throw new BackupException("HBase backup is not enabled. Check your "
|
||||||
+ BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting.");
|
+ BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting.");
|
||||||
}
|
}
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.conn = conn;
|
this.conn = conn;
|
||||||
|
@ -120,12 +121,13 @@ public class BackupManager implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
|
plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
|
||||||
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, (plugins == null ? "" : plugins + ",") +
|
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
|
||||||
BackupHFileCleaner.class.getName());
|
(plugins == null ? "" : plugins + ",") + BackupHFileCleaner.class.getName());
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Added log cleaner: {}. Added master procedure manager: {}."
|
LOG.debug(
|
||||||
+"Added master procedure manager: {}", cleanerClass, masterProcedureClass,
|
"Added log cleaner: {}. Added master procedure manager: {}."
|
||||||
BackupHFileCleaner.class.getName());
|
+ "Added master procedure manager: {}",
|
||||||
|
cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,8 +165,7 @@ public class BackupManager implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get configuration
|
* Get configuration n
|
||||||
* @return configuration
|
|
||||||
*/
|
*/
|
||||||
Configuration getConf() {
|
Configuration getConf() {
|
||||||
return conf;
|
return conf;
|
||||||
|
@ -186,17 +187,15 @@ public class BackupManager implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a backup info based on input backup request.
|
* Creates a backup info based on input backup request.
|
||||||
* @param backupId backup id
|
* @param backupId backup id
|
||||||
* @param type type
|
* @param type type
|
||||||
* @param tableList table list
|
* @param tableList table list
|
||||||
* @param targetRootDir root dir
|
* @param targetRootDir root dir
|
||||||
* @param workers number of parallel workers
|
* @param workers number of parallel workers
|
||||||
* @param bandwidth bandwidth per worker in MB per sec
|
* @param bandwidth bandwidth per worker in MB per sec n * @throws BackupException exception
|
||||||
* @return BackupInfo
|
|
||||||
* @throws BackupException exception
|
|
||||||
*/
|
*/
|
||||||
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList,
|
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList,
|
||||||
String targetRootDir, int workers, long bandwidth) throws BackupException {
|
String targetRootDir, int workers, long bandwidth) throws BackupException {
|
||||||
if (targetRootDir == null) {
|
if (targetRootDir == null) {
|
||||||
throw new BackupException("Wrong backup request parameter: target backup root directory");
|
throw new BackupException("Wrong backup request parameter: target backup root directory");
|
||||||
}
|
}
|
||||||
|
@ -292,8 +291,8 @@ public class BackupManager implements Closeable {
|
||||||
BackupImage.Builder builder = BackupImage.newBuilder();
|
BackupImage.Builder builder = BackupImage.newBuilder();
|
||||||
|
|
||||||
BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
||||||
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
|
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
|
||||||
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
||||||
|
|
||||||
// Only direct ancestors for a backup are required and not entire history of backup for this
|
// Only direct ancestors for a backup are required and not entire history of backup for this
|
||||||
// table resulting in verifying all of the previous backups which is unnecessary and backup
|
// table resulting in verifying all of the previous backups which is unnecessary and backup
|
||||||
|
@ -320,21 +319,21 @@ public class BackupManager implements Closeable {
|
||||||
if (BackupManifest.canCoverImage(ancestors, image)) {
|
if (BackupManifest.canCoverImage(ancestors, image)) {
|
||||||
LOG.debug("Met the backup boundary of the current table set:");
|
LOG.debug("Met the backup boundary of the current table set:");
|
||||||
for (BackupImage image1 : ancestors) {
|
for (BackupImage image1 : ancestors) {
|
||||||
LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir());
|
LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Path logBackupPath =
|
Path logBackupPath =
|
||||||
HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId());
|
HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId());
|
||||||
LOG.debug("Current backup has an incremental backup ancestor, "
|
LOG.debug(
|
||||||
+ "touching its image manifest in {}"
|
"Current backup has an incremental backup ancestor, "
|
||||||
+ " to construct the dependency.", logBackupPath.toString());
|
+ "touching its image manifest in {}" + " to construct the dependency.",
|
||||||
|
logBackupPath.toString());
|
||||||
BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath);
|
BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath);
|
||||||
BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
|
BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
|
||||||
ancestors.add(lastIncrImage);
|
ancestors.add(lastIncrImage);
|
||||||
|
|
||||||
LOG.debug(
|
LOG.debug("Last dependent incremental backup image: {BackupID={}" + "BackupDir={}}",
|
||||||
"Last dependent incremental backup image: {BackupID={}" +
|
lastIncrImage.getBackupId(), lastIncrImage.getRootDir());
|
||||||
"BackupDir={}}", lastIncrImage.getBackupId(), lastIncrImage.getRootDir());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -345,12 +344,12 @@ public class BackupManager implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Get the direct ancestors of this backup for one table involved.
|
* Get the direct ancestors of this backup for one table involved.
|
||||||
* @param backupInfo backup info
|
* @param backupInfo backup info
|
||||||
* @param table table
|
* @param table table
|
||||||
* @return backupImages on the dependency list
|
* @return backupImages on the dependency list
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo, TableName table)
|
public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo, TableName table)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
ArrayList<BackupImage> ancestors = getAncestors(backupInfo);
|
ArrayList<BackupImage> ancestors = getAncestors(backupInfo);
|
||||||
ArrayList<BackupImage> tableAncestors = new ArrayList<>();
|
ArrayList<BackupImage> tableAncestors = new ArrayList<>();
|
||||||
for (BackupImage image : ancestors) {
|
for (BackupImage image : ancestors) {
|
||||||
|
@ -399,11 +398,13 @@ public class BackupManager implements Closeable {
|
||||||
// Restore the interrupted status
|
// Restore the interrupted status
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
}
|
}
|
||||||
if (lastWarningOutputTime == 0
|
if (
|
||||||
|| (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000) {
|
lastWarningOutputTime == 0
|
||||||
|
|| (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000
|
||||||
|
) {
|
||||||
lastWarningOutputTime = EnvironmentEdgeManager.currentTime();
|
lastWarningOutputTime = EnvironmentEdgeManager.currentTime();
|
||||||
LOG.warn("Waiting to acquire backup exclusive lock for {}s",
|
LOG.warn("Waiting to acquire backup exclusive lock for {}s",
|
||||||
+(lastWarningOutputTime - startTime) / 1000);
|
+(lastWarningOutputTime - startTime) / 1000);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
throw e;
|
throw e;
|
||||||
|
@ -480,8 +481,8 @@ public class BackupManager implements Closeable {
|
||||||
* @param tables tables
|
* @param tables tables
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
public void writeRegionServerLogTimestamp(Set<TableName> tables,
|
public void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String, Long> newTimestamps)
|
||||||
Map<String, Long> newTimestamps) throws IOException {
|
throws IOException {
|
||||||
systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, backupInfo.getBackupRootDir());
|
systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, backupInfo.getBackupRootDir());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,7 +15,6 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.backup.impl;
|
package org.apache.hadoop.hbase.backup.impl;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -26,7 +25,6 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
@ -50,9 +48,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||||
/**
|
/**
|
||||||
* Backup manifest contains all the meta data of a backup image. The manifest info will be bundled
|
* Backup manifest contains all the meta data of a backup image. The manifest info will be bundled
|
||||||
* as manifest file together with data. So that each backup image will contain all the info needed
|
* as manifest file together with data. So that each backup image will contain all the info needed
|
||||||
* for restore. BackupManifest is a storage container for BackupImage.
|
* for restore. BackupManifest is a storage container for BackupImage. It is responsible for
|
||||||
* It is responsible for storing/reading backup image data and has some additional utility methods.
|
* storing/reading backup image data and has some additional utility methods.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class BackupManifest {
|
public class BackupManifest {
|
||||||
|
@ -126,8 +123,8 @@ public class BackupManifest {
|
||||||
super();
|
super();
|
||||||
}
|
}
|
||||||
|
|
||||||
private BackupImage(String backupId, BackupType type, String rootDir,
|
private BackupImage(String backupId, BackupType type, String rootDir, List<TableName> tableList,
|
||||||
List<TableName> tableList, long startTs, long completeTs) {
|
long startTs, long completeTs) {
|
||||||
this.backupId = backupId;
|
this.backupId = backupId;
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.rootDir = rootDir;
|
this.rootDir = rootDir;
|
||||||
|
@ -149,9 +146,9 @@ public class BackupManifest {
|
||||||
|
|
||||||
List<BackupProtos.BackupImage> ancestorList = im.getAncestorsList();
|
List<BackupProtos.BackupImage> ancestorList = im.getAncestorsList();
|
||||||
|
|
||||||
BackupType type =
|
BackupType type = im.getBackupType() == BackupProtos.BackupType.FULL
|
||||||
im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL
|
? BackupType.FULL
|
||||||
: BackupType.INCREMENTAL;
|
: BackupType.INCREMENTAL;
|
||||||
|
|
||||||
BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
|
BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
|
||||||
for (BackupProtos.BackupImage img : ancestorList) {
|
for (BackupProtos.BackupImage img : ancestorList) {
|
||||||
|
@ -187,8 +184,8 @@ public class BackupManifest {
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Map<TableName, Map<String, Long>> loadIncrementalTimestampMap(
|
private static Map<TableName, Map<String, Long>>
|
||||||
BackupProtos.BackupImage proto) {
|
loadIncrementalTimestampMap(BackupProtos.BackupImage proto) {
|
||||||
List<BackupProtos.TableServerTimestamp> list = proto.getTstMapList();
|
List<BackupProtos.TableServerTimestamp> list = proto.getTstMapList();
|
||||||
|
|
||||||
Map<TableName, Map<String, Long>> incrTimeRanges = new HashMap<>();
|
Map<TableName, Map<String, Long>> incrTimeRanges = new HashMap<>();
|
||||||
|
@ -221,13 +218,13 @@ public class BackupManifest {
|
||||||
TableName key = entry.getKey();
|
TableName key = entry.getKey();
|
||||||
Map<String, Long> value = entry.getValue();
|
Map<String, Long> value = entry.getValue();
|
||||||
BackupProtos.TableServerTimestamp.Builder tstBuilder =
|
BackupProtos.TableServerTimestamp.Builder tstBuilder =
|
||||||
BackupProtos.TableServerTimestamp.newBuilder();
|
BackupProtos.TableServerTimestamp.newBuilder();
|
||||||
tstBuilder.setTableName(ProtobufUtil.toProtoTableName(key));
|
tstBuilder.setTableName(ProtobufUtil.toProtoTableName(key));
|
||||||
|
|
||||||
for (Map.Entry<String, Long> entry2 : value.entrySet()) {
|
for (Map.Entry<String, Long> entry2 : value.entrySet()) {
|
||||||
String s = entry2.getKey();
|
String s = entry2.getKey();
|
||||||
BackupProtos.ServerTimestamp.Builder stBuilder =
|
BackupProtos.ServerTimestamp.Builder stBuilder =
|
||||||
BackupProtos.ServerTimestamp.newBuilder();
|
BackupProtos.ServerTimestamp.newBuilder();
|
||||||
HBaseProtos.ServerName.Builder snBuilder = HBaseProtos.ServerName.newBuilder();
|
HBaseProtos.ServerName.Builder snBuilder = HBaseProtos.ServerName.newBuilder();
|
||||||
ServerName sn = ServerName.parseServerName(s);
|
ServerName sn = ServerName.parseServerName(s);
|
||||||
snBuilder.setHostName(sn.getHostname());
|
snBuilder.setHostName(sn.getHostname());
|
||||||
|
@ -378,10 +375,9 @@ public class BackupManifest {
|
||||||
*/
|
*/
|
||||||
public BackupManifest(BackupInfo backup) {
|
public BackupManifest(BackupInfo backup) {
|
||||||
BackupImage.Builder builder = BackupImage.newBuilder();
|
BackupImage.Builder builder = BackupImage.newBuilder();
|
||||||
this.backupImage =
|
this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
||||||
builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
|
||||||
.withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames())
|
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
||||||
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -393,16 +389,14 @@ public class BackupManifest {
|
||||||
List<TableName> tables = new ArrayList<TableName>();
|
List<TableName> tables = new ArrayList<TableName>();
|
||||||
tables.add(table);
|
tables.add(table);
|
||||||
BackupImage.Builder builder = BackupImage.newBuilder();
|
BackupImage.Builder builder = BackupImage.newBuilder();
|
||||||
this.backupImage =
|
this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
||||||
builder.withBackupId(backup.getBackupId()).withType(backup.getType())
|
.withRootDir(backup.getBackupRootDir()).withTableList(tables)
|
||||||
.withRootDir(backup.getBackupRootDir()).withTableList(tables)
|
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
||||||
.withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct manifest from a backup directory.
|
* Construct manifest from a backup directory.
|
||||||
*
|
* @param conf configuration
|
||||||
* @param conf configuration
|
|
||||||
* @param backupPath backup path
|
* @param backupPath backup path
|
||||||
* @throws IOException if constructing the manifest from the backup directory fails
|
* @throws IOException if constructing the manifest from the backup directory fails
|
||||||
*/
|
*/
|
||||||
|
@ -412,7 +406,7 @@ public class BackupManifest {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct manifest from a backup directory.
|
* Construct manifest from a backup directory.
|
||||||
* @param fs the FileSystem
|
* @param fs the FileSystem
|
||||||
* @param backupPath backup path
|
* @param backupPath backup path
|
||||||
* @throws BackupException exception
|
* @throws BackupException exception
|
||||||
*/
|
*/
|
||||||
|
@ -449,7 +443,7 @@ public class BackupManifest {
|
||||||
}
|
}
|
||||||
this.backupImage = BackupImage.fromProto(proto);
|
this.backupImage = BackupImage.fromProto(proto);
|
||||||
LOG.debug("Loaded manifest instance from manifest file: "
|
LOG.debug("Loaded manifest instance from manifest file: "
|
||||||
+ BackupUtils.getPath(subFile.getPath()));
|
+ BackupUtils.getPath(subFile.getPath()));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -480,10 +474,10 @@ public class BackupManifest {
|
||||||
byte[] data = backupImage.toProto().toByteArray();
|
byte[] data = backupImage.toProto().toByteArray();
|
||||||
// write the file, overwrite if already exist
|
// write the file, overwrite if already exist
|
||||||
Path manifestFilePath =
|
Path manifestFilePath =
|
||||||
new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(),
|
new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), backupImage.getBackupId()),
|
||||||
backupImage.getBackupId()), MANIFEST_FILE_NAME);
|
MANIFEST_FILE_NAME);
|
||||||
try (FSDataOutputStream out =
|
try (FSDataOutputStream out =
|
||||||
manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) {
|
manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) {
|
||||||
out.write(data);
|
out.write(data);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new BackupException(e.getMessage());
|
throw new BackupException(e.getMessage());
|
||||||
|
@ -531,8 +525,8 @@ public class BackupManifest {
|
||||||
for (BackupImage image : backupImage.getAncestors()) {
|
for (BackupImage image : backupImage.getAncestors()) {
|
||||||
restoreImages.put(Long.valueOf(image.startTs), image);
|
restoreImages.put(Long.valueOf(image.startTs), image);
|
||||||
}
|
}
|
||||||
return new ArrayList<>(reverse ? (restoreImages.descendingMap().values())
|
return new ArrayList<>(
|
||||||
: (restoreImages.values()));
|
reverse ? (restoreImages.descendingMap().values()) : (restoreImages.values()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -614,7 +608,7 @@ public class BackupManifest {
|
||||||
/**
|
/**
|
||||||
* Check whether backup image set could cover a backup image or not.
|
* Check whether backup image set could cover a backup image or not.
|
||||||
* @param fullImages The backup image set
|
* @param fullImages The backup image set
|
||||||
* @param image The target backup image
|
* @param image The target backup image
|
||||||
* @return true if fullImages can cover image, otherwise false
|
* @return true if fullImages can cover image, otherwise false
|
||||||
*/
|
*/
|
||||||
public static boolean canCoverImage(ArrayList<BackupImage> fullImages, BackupImage image) {
|
public static boolean canCoverImage(ArrayList<BackupImage> fullImages, BackupImage image) {
|
||||||
|
@ -664,8 +658,8 @@ public class BackupManifest {
|
||||||
info.setStartTs(backupImage.getStartTs());
|
info.setStartTs(backupImage.getStartTs());
|
||||||
info.setBackupRootDir(backupImage.getRootDir());
|
info.setBackupRootDir(backupImage.getRootDir());
|
||||||
if (backupImage.getType() == BackupType.INCREMENTAL) {
|
if (backupImage.getType() == BackupType.INCREMENTAL) {
|
||||||
info.setHLogTargetDir(BackupUtils.getLogBackupDir(backupImage.getRootDir(),
|
info.setHLogTargetDir(
|
||||||
backupImage.getBackupId()));
|
BackupUtils.getLogBackupDir(backupImage.getRootDir(), backupImage.getBackupId()));
|
||||||
}
|
}
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/**
|
/*
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -69,6 +68,7 @@ import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
long TIMEOUT = 60000;
|
long TIMEOUT = 60000;
|
||||||
long startTime = EnvironmentEdgeManager.currentTime();
|
long startTime = EnvironmentEdgeManager.currentTime();
|
||||||
LOG.debug("Backup table {} is not present and available, waiting for it to become so",
|
LOG.debug("Backup table {} is not present and available, waiting for it to become so",
|
||||||
tableName);
|
tableName);
|
||||||
while (!admin.tableExists(tableName) || !admin.isTableAvailable(tableName)) {
|
while (!admin.tableExists(tableName) || !admin.isTableAvailable(tableName)) {
|
||||||
try {
|
try {
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
|
@ -274,15 +274,17 @@ public final class BackupSystemTable implements Closeable {
|
||||||
Map<byte[], String> readBulkLoadedFiles(String backupId) throws IOException {
|
Map<byte[], String> readBulkLoadedFiles(String backupId) throws IOException {
|
||||||
Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
|
Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
|
||||||
try (Table table = connection.getTable(bulkLoadTableName);
|
try (Table table = connection.getTable(bulkLoadTableName);
|
||||||
ResultScanner scanner = table.getScanner(scan)) {
|
ResultScanner scanner = table.getScanner(scan)) {
|
||||||
Result res = null;
|
Result res = null;
|
||||||
Map<byte[], String> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
Map<byte[], String> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
while ((res = scanner.next()) != null) {
|
while ((res = scanner.next()) != null) {
|
||||||
res.advance();
|
res.advance();
|
||||||
byte[] row = CellUtil.cloneRow(res.listCells().get(0));
|
byte[] row = CellUtil.cloneRow(res.listCells().get(0));
|
||||||
for (Cell cell : res.listCells()) {
|
for (Cell cell : res.listCells()) {
|
||||||
if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
if (
|
||||||
BackupSystemTable.PATH_COL.length) == 0) {
|
CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
||||||
|
BackupSystemTable.PATH_COL.length) == 0
|
||||||
|
) {
|
||||||
map.put(row, Bytes.toString(CellUtil.cloneValue(cell)));
|
map.put(row, Bytes.toString(CellUtil.cloneValue(cell)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -298,11 +300,11 @@ public final class BackupSystemTable implements Closeable {
|
||||||
* @return array of Map of family to List of Paths
|
* @return array of Map of family to List of Paths
|
||||||
*/
|
*/
|
||||||
public Map<byte[], List<Path>>[] readBulkLoadedFiles(String backupId, List<TableName> sTableList)
|
public Map<byte[], List<Path>>[] readBulkLoadedFiles(String backupId, List<TableName> sTableList)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
|
Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
|
||||||
Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()];
|
Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()];
|
||||||
try (Table table = connection.getTable(bulkLoadTableName);
|
try (Table table = connection.getTable(bulkLoadTableName);
|
||||||
ResultScanner scanner = table.getScanner(scan)) {
|
ResultScanner scanner = table.getScanner(scan)) {
|
||||||
Result res = null;
|
Result res = null;
|
||||||
while ((res = scanner.next()) != null) {
|
while ((res = scanner.next()) != null) {
|
||||||
res.advance();
|
res.advance();
|
||||||
|
@ -310,14 +312,20 @@ public final class BackupSystemTable implements Closeable {
|
||||||
byte[] fam = null;
|
byte[] fam = null;
|
||||||
String path = null;
|
String path = null;
|
||||||
for (Cell cell : res.listCells()) {
|
for (Cell cell : res.listCells()) {
|
||||||
if (CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
|
if (
|
||||||
BackupSystemTable.TBL_COL.length) == 0) {
|
CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
|
||||||
|
BackupSystemTable.TBL_COL.length) == 0
|
||||||
|
) {
|
||||||
tbl = TableName.valueOf(CellUtil.cloneValue(cell));
|
tbl = TableName.valueOf(CellUtil.cloneValue(cell));
|
||||||
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
|
} else if (
|
||||||
BackupSystemTable.FAM_COL.length) == 0) {
|
CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
|
||||||
|
BackupSystemTable.FAM_COL.length) == 0
|
||||||
|
) {
|
||||||
fam = CellUtil.cloneValue(cell);
|
fam = CellUtil.cloneValue(cell);
|
||||||
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
} else if (
|
||||||
BackupSystemTable.PATH_COL.length) == 0) {
|
CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
||||||
|
BackupSystemTable.PATH_COL.length) == 0
|
||||||
|
) {
|
||||||
path = Bytes.toString(CellUtil.cloneValue(cell));
|
path = Bytes.toString(CellUtil.cloneValue(cell));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -368,7 +376,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
* @param finalPaths family and associated hfiles
|
* @param finalPaths family and associated hfiles
|
||||||
*/
|
*/
|
||||||
public void writePathsPostBulkLoad(TableName tabName, byte[] region,
|
public void writePathsPostBulkLoad(TableName tabName, byte[] region,
|
||||||
Map<byte[], List<Path>> finalPaths) throws IOException {
|
Map<byte[], List<Path>> finalPaths) throws IOException {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("write bulk load descriptor to backup " + tabName + " with " + finalPaths.size()
|
LOG.debug("write bulk load descriptor to backup " + tabName + " with " + finalPaths.size()
|
||||||
+ " entries");
|
+ " entries");
|
||||||
|
@ -388,14 +396,14 @@ public final class BackupSystemTable implements Closeable {
|
||||||
* @param pairs list of paths for hfiles
|
* @param pairs list of paths for hfiles
|
||||||
*/
|
*/
|
||||||
public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, final byte[] family,
|
public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, final byte[] family,
|
||||||
final List<Pair<Path, Path>> pairs) throws IOException {
|
final List<Pair<Path, Path>> pairs) throws IOException {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"write bulk load descriptor to backup " + tabName + " with " + pairs.size() + " entries");
|
"write bulk load descriptor to backup " + tabName + " with " + pairs.size() + " entries");
|
||||||
}
|
}
|
||||||
try (Table table = connection.getTable(bulkLoadTableName)) {
|
try (Table table = connection.getTable(bulkLoadTableName)) {
|
||||||
List<Put> puts =
|
List<Put> puts =
|
||||||
BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs);
|
BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs);
|
||||||
table.put(puts);
|
table.put(puts);
|
||||||
LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName);
|
LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName);
|
||||||
}
|
}
|
||||||
|
@ -434,7 +442,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
Scan scan = BackupSystemTable.createScanForOrigBulkLoadedFiles(tTable);
|
Scan scan = BackupSystemTable.createScanForOrigBulkLoadedFiles(tTable);
|
||||||
Map<String, Map<String, List<Pair<String, Boolean>>>> tblMap = map.get(tTable);
|
Map<String, Map<String, List<Pair<String, Boolean>>>> tblMap = map.get(tTable);
|
||||||
try (Table table = connection.getTable(bulkLoadTableName);
|
try (Table table = connection.getTable(bulkLoadTableName);
|
||||||
ResultScanner scanner = table.getScanner(scan)) {
|
ResultScanner scanner = table.getScanner(scan)) {
|
||||||
Result res = null;
|
Result res = null;
|
||||||
while ((res = scanner.next()) != null) {
|
while ((res = scanner.next()) != null) {
|
||||||
res.advance();
|
res.advance();
|
||||||
|
@ -448,14 +456,20 @@ public final class BackupSystemTable implements Closeable {
|
||||||
rows.add(row);
|
rows.add(row);
|
||||||
String rowStr = Bytes.toString(row);
|
String rowStr = Bytes.toString(row);
|
||||||
region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr);
|
region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr);
|
||||||
if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
|
if (
|
||||||
BackupSystemTable.FAM_COL.length) == 0) {
|
CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
|
||||||
|
BackupSystemTable.FAM_COL.length) == 0
|
||||||
|
) {
|
||||||
fam = Bytes.toString(CellUtil.cloneValue(cell));
|
fam = Bytes.toString(CellUtil.cloneValue(cell));
|
||||||
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
} else if (
|
||||||
BackupSystemTable.PATH_COL.length) == 0) {
|
CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
|
||||||
|
BackupSystemTable.PATH_COL.length) == 0
|
||||||
|
) {
|
||||||
path = Bytes.toString(CellUtil.cloneValue(cell));
|
path = Bytes.toString(CellUtil.cloneValue(cell));
|
||||||
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
|
} else if (
|
||||||
BackupSystemTable.STATE_COL.length) == 0) {
|
CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
|
||||||
|
BackupSystemTable.STATE_COL.length) == 0
|
||||||
|
) {
|
||||||
byte[] state = CellUtil.cloneValue(cell);
|
byte[] state = CellUtil.cloneValue(cell);
|
||||||
if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) {
|
if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) {
|
||||||
raw = true;
|
raw = true;
|
||||||
|
@ -489,7 +503,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
* @param backupId the backup Id
|
* @param backupId the backup Id
|
||||||
*/
|
*/
|
||||||
public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Path>>[] maps,
|
public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Path>>[] maps,
|
||||||
String backupId) throws IOException {
|
String backupId) throws IOException {
|
||||||
try (Table table = connection.getTable(bulkLoadTableName)) {
|
try (Table table = connection.getTable(bulkLoadTableName)) {
|
||||||
long ts = EnvironmentEdgeManager.currentTime();
|
long ts = EnvironmentEdgeManager.currentTime();
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
|
@ -566,7 +580,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write the start code (timestamp) to backup system table. If passed in null, then write 0 byte.
|
* Write the start code (timestamp) to backup system table. If passed in null, then write 0 byte.
|
||||||
* @param startCode start code
|
* @param startCode start code
|
||||||
* @param backupRoot root directory path to backup
|
* @param backupRoot root directory path to backup
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
|
@ -583,7 +597,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Exclusive operations are: create, delete, merge
|
* Exclusive operations are: create, delete, merge
|
||||||
* @throws IOException if a table operation fails or an active backup exclusive operation is
|
* @throws IOException if a table operation fails or an active backup exclusive operation is
|
||||||
* already underway
|
* already underway
|
||||||
*/
|
*/
|
||||||
public void startBackupExclusiveOperation() throws IOException {
|
public void startBackupExclusiveOperation() throws IOException {
|
||||||
LOG.debug("Start new backup exclusive operation");
|
LOG.debug("Start new backup exclusive operation");
|
||||||
|
@ -591,11 +605,15 @@ public final class BackupSystemTable implements Closeable {
|
||||||
try (Table table = connection.getTable(tableName)) {
|
try (Table table = connection.getTable(tableName)) {
|
||||||
Put put = createPutForStartBackupSession();
|
Put put = createPutForStartBackupSession();
|
||||||
// First try to put if row does not exist
|
// First try to put if row does not exist
|
||||||
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
if (
|
||||||
.ifNotExists().thenPut(put)) {
|
!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
||||||
|
.ifNotExists().thenPut(put)
|
||||||
|
) {
|
||||||
// Row exists, try to put if value == ACTIVE_SESSION_NO
|
// Row exists, try to put if value == ACTIVE_SESSION_NO
|
||||||
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
if (
|
||||||
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)) {
|
!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
||||||
|
.ifEquals(ACTIVE_SESSION_NO).thenPut(put)
|
||||||
|
) {
|
||||||
throw new ExclusiveOperationException();
|
throw new ExclusiveOperationException();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -613,8 +631,10 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
try (Table table = connection.getTable(tableName)) {
|
try (Table table = connection.getTable(tableName)) {
|
||||||
Put put = createPutForStopBackupSession();
|
Put put = createPutForStopBackupSession();
|
||||||
if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
if (
|
||||||
.ifEquals(ACTIVE_SESSION_YES).thenPut(put)) {
|
!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
|
||||||
|
.ifEquals(ACTIVE_SESSION_YES).thenPut(put)
|
||||||
|
) {
|
||||||
throw new IOException("There is no active backup exclusive operation");
|
throw new IOException("There is no active backup exclusive operation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -633,13 +653,13 @@ public final class BackupSystemTable implements Closeable {
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
public HashMap<String, Long> readRegionServerLastLogRollResult(String backupRoot)
|
public HashMap<String, Long> readRegionServerLastLogRollResult(String backupRoot)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
LOG.trace("read region server last roll log result to backup system table");
|
LOG.trace("read region server last roll log result to backup system table");
|
||||||
|
|
||||||
Scan scan = createScanForReadRegionServerLastLogRollResult(backupRoot);
|
Scan scan = createScanForReadRegionServerLastLogRollResult(backupRoot);
|
||||||
|
|
||||||
try (Table table = connection.getTable(tableName);
|
try (Table table = connection.getTable(tableName);
|
||||||
ResultScanner scanner = table.getScanner(scan)) {
|
ResultScanner scanner = table.getScanner(scan)) {
|
||||||
Result res;
|
Result res;
|
||||||
HashMap<String, Long> rsTimestampMap = new HashMap<>();
|
HashMap<String, Long> rsTimestampMap = new HashMap<>();
|
||||||
while ((res = scanner.next()) != null) {
|
while ((res = scanner.next()) != null) {
|
||||||
|
@ -656,13 +676,13 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Writes Region Server last roll log result (timestamp) to backup system table table
|
* Writes Region Server last roll log result (timestamp) to backup system table table
|
||||||
* @param server Region Server name
|
* @param server Region Server name
|
||||||
* @param ts last log timestamp
|
* @param ts last log timestamp
|
||||||
* @param backupRoot root directory path to backup
|
* @param backupRoot root directory path to backup
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot)
|
public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
LOG.trace("write region server last roll log result to backup system table");
|
LOG.trace("write region server last roll log result to backup system table");
|
||||||
|
|
||||||
try (Table table = connection.getTable(tableName)) {
|
try (Table table = connection.getTable(tableName)) {
|
||||||
|
@ -710,7 +730,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get backup history records filtered by list of filters.
|
* Get backup history records filtered by list of filters.
|
||||||
* @param n max number of records, if n == -1 , then max number is ignored
|
* @param n max number of records, if n == -1 , then max number is ignored
|
||||||
* @param filters list of filters
|
* @param filters list of filters
|
||||||
* @return backup records
|
* @return backup records
|
||||||
* @throws IOException if getting the backup history fails
|
* @throws IOException if getting the backup history fails
|
||||||
|
@ -793,7 +813,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
|
public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
|
||||||
String backupRoot) throws IOException {
|
String backupRoot) throws IOException {
|
||||||
List<BackupInfo> history = getBackupHistory(backupRoot);
|
List<BackupInfo> history = getBackupHistory(backupRoot);
|
||||||
Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>();
|
Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>();
|
||||||
for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
|
for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
|
||||||
|
@ -829,7 +849,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
ArrayList<BackupInfo> list = new ArrayList<>();
|
ArrayList<BackupInfo> list = new ArrayList<>();
|
||||||
|
|
||||||
try (Table table = connection.getTable(tableName);
|
try (Table table = connection.getTable(tableName);
|
||||||
ResultScanner scanner = table.getScanner(scan)) {
|
ResultScanner scanner = table.getScanner(scan)) {
|
||||||
Result res;
|
Result res;
|
||||||
while ((res = scanner.next()) != null) {
|
while ((res = scanner.next()) != null) {
|
||||||
res.advance();
|
res.advance();
|
||||||
|
@ -847,16 +867,16 @@ public final class BackupSystemTable implements Closeable {
|
||||||
* Write the current timestamps for each regionserver to backup system table after a successful
|
* Write the current timestamps for each regionserver to backup system table after a successful
|
||||||
* full or incremental backup. The saved timestamp is of the last log file that was backed up
|
* full or incremental backup. The saved timestamp is of the last log file that was backed up
|
||||||
* already.
|
* already.
|
||||||
* @param tables tables
|
* @param tables tables
|
||||||
* @param newTimestamps timestamps
|
* @param newTimestamps timestamps
|
||||||
* @param backupRoot root directory path to backup
|
* @param backupRoot root directory path to backup
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
public void writeRegionServerLogTimestamp(Set<TableName> tables,
|
public void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String, Long> newTimestamps,
|
||||||
Map<String, Long> newTimestamps, String backupRoot) throws IOException {
|
String backupRoot) throws IOException {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("write RS log time stamps to backup system table for tables ["
|
LOG.trace("write RS log time stamps to backup system table for tables ["
|
||||||
+ StringUtils.join(tables, ",") + "]");
|
+ StringUtils.join(tables, ",") + "]");
|
||||||
}
|
}
|
||||||
List<Put> puts = new ArrayList<>();
|
List<Put> puts = new ArrayList<>();
|
||||||
for (TableName table : tables) {
|
for (TableName table : tables) {
|
||||||
|
@ -879,7 +899,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
public Map<TableName, Map<String, Long>> readLogTimestampMap(String backupRoot)
|
public Map<TableName, Map<String, Long>> readLogTimestampMap(String backupRoot)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("read RS log ts from backup system table for root=" + backupRoot);
|
LOG.trace("read RS log ts from backup system table for root=" + backupRoot);
|
||||||
}
|
}
|
||||||
|
@ -888,7 +908,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
Scan scan = createScanForReadLogTimestampMap(backupRoot);
|
Scan scan = createScanForReadLogTimestampMap(backupRoot);
|
||||||
try (Table table = connection.getTable(tableName);
|
try (Table table = connection.getTable(tableName);
|
||||||
ResultScanner scanner = table.getScanner(scan)) {
|
ResultScanner scanner = table.getScanner(scan)) {
|
||||||
Result res;
|
Result res;
|
||||||
while ((res = scanner.next()) != null) {
|
while ((res = scanner.next()) != null) {
|
||||||
res.advance();
|
res.advance();
|
||||||
|
@ -899,11 +919,11 @@ public final class BackupSystemTable implements Closeable {
|
||||||
byte[] data = CellUtil.cloneValue(cell);
|
byte[] data = CellUtil.cloneValue(cell);
|
||||||
if (data == null) {
|
if (data == null) {
|
||||||
throw new IOException("Data of last backup data from backup system table "
|
throw new IOException("Data of last backup data from backup system table "
|
||||||
+ "is empty. Create a backup first.");
|
+ "is empty. Create a backup first.");
|
||||||
}
|
}
|
||||||
if (data != null && data.length > 0) {
|
if (data != null && data.length > 0) {
|
||||||
HashMap<String, Long> lastBackup =
|
HashMap<String, Long> lastBackup =
|
||||||
fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data));
|
fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data));
|
||||||
tableTimestampMap.put(tn, lastBackup);
|
tableTimestampMap.put(tn, lastBackup);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -912,11 +932,11 @@ public final class BackupSystemTable implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table,
|
private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table,
|
||||||
Map<String, Long> map) {
|
Map<String, Long> map) {
|
||||||
BackupProtos.TableServerTimestamp.Builder tstBuilder =
|
BackupProtos.TableServerTimestamp.Builder tstBuilder =
|
||||||
BackupProtos.TableServerTimestamp.newBuilder();
|
BackupProtos.TableServerTimestamp.newBuilder();
|
||||||
tstBuilder
|
tstBuilder
|
||||||
.setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
|
.setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
|
||||||
|
|
||||||
for (Entry<String, Long> entry : map.entrySet()) {
|
for (Entry<String, Long> entry : map.entrySet()) {
|
||||||
BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder();
|
BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder();
|
||||||
|
@ -939,7 +959,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
|
List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
|
||||||
for (BackupProtos.ServerTimestamp st : list) {
|
for (BackupProtos.ServerTimestamp st : list) {
|
||||||
ServerName sn =
|
ServerName sn =
|
||||||
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(st.getServerName());
|
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(st.getServerName());
|
||||||
map.put(sn.getHostname() + ":" + sn.getPort(), st.getTimestamp());
|
map.put(sn.getHostname() + ":" + sn.getPort(), st.getTimestamp());
|
||||||
}
|
}
|
||||||
return map;
|
return map;
|
||||||
|
@ -973,12 +993,12 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add tables to global incremental backup set
|
* Add tables to global incremental backup set
|
||||||
* @param tables set of tables
|
* @param tables set of tables
|
||||||
* @param backupRoot root directory path to backup
|
* @param backupRoot root directory path to backup
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
public void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot)
|
public void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("Add incremental backup table set to backup system table. ROOT=" + backupRoot
|
LOG.trace("Add incremental backup table set to backup system table. ROOT=" + backupRoot
|
||||||
+ " tables [" + StringUtils.join(tables, " ") + "]");
|
+ " tables [" + StringUtils.join(tables, " ") + "]");
|
||||||
|
@ -1019,7 +1039,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
Scan scan = createScanForBackupHistory();
|
Scan scan = createScanForBackupHistory();
|
||||||
scan.setCaching(1);
|
scan.setCaching(1);
|
||||||
try (Table table = connection.getTable(tableName);
|
try (Table table = connection.getTable(tableName);
|
||||||
ResultScanner scanner = table.getScanner(scan)) {
|
ResultScanner scanner = table.getScanner(scan)) {
|
||||||
if (scanner.next() != null) {
|
if (scanner.next() != null) {
|
||||||
result = true;
|
result = true;
|
||||||
}
|
}
|
||||||
|
@ -1073,13 +1093,13 @@ public final class BackupSystemTable implements Closeable {
|
||||||
res.advance();
|
res.advance();
|
||||||
String[] tables = cellValueToBackupSet(res.current());
|
String[] tables = cellValueToBackupSet(res.current());
|
||||||
return Arrays.asList(tables).stream().map(item -> TableName.valueOf(item))
|
return Arrays.asList(tables).stream().map(item -> TableName.valueOf(item))
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add backup set (list of tables)
|
* Add backup set (list of tables)
|
||||||
* @param name set name
|
* @param name set name
|
||||||
* @param newTables list of tables, comma-separated
|
* @param newTables list of tables, comma-separated
|
||||||
* @throws IOException if a table operation fails
|
* @throws IOException if a table operation fails
|
||||||
*/
|
*/
|
||||||
|
@ -1105,7 +1125,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove tables from backup set (list of tables)
|
* Remove tables from backup set (list of tables)
|
||||||
* @param name set name
|
* @param name set name
|
||||||
* @param toRemove list of tables
|
* @param toRemove list of tables
|
||||||
* @throws IOException if a table operation or deleting the backup set fails
|
* @throws IOException if a table operation or deleting the backup set fails
|
||||||
*/
|
*/
|
||||||
|
@ -1132,7 +1152,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
table.put(put);
|
table.put(put);
|
||||||
} else if (disjoint.length == tables.length) {
|
} else if (disjoint.length == tables.length) {
|
||||||
LOG.warn("Backup set '" + name + "' does not contain tables ["
|
LOG.warn("Backup set '" + name + "' does not contain tables ["
|
||||||
+ StringUtils.join(toRemove, " ") + "]");
|
+ StringUtils.join(toRemove, " ") + "]");
|
||||||
} else { // disjoint.length == 0 and tables.length >0
|
} else { // disjoint.length == 0 and tables.length >0
|
||||||
// Delete backup set
|
// Delete backup set
|
||||||
LOG.info("Backup set '" + name + "' is empty. Deleting.");
|
LOG.info("Backup set '" + name + "' is empty. Deleting.");
|
||||||
|
@ -1176,7 +1196,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(getTableName(conf));
|
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(getTableName(conf));
|
||||||
|
|
||||||
ColumnFamilyDescriptorBuilder colBuilder =
|
ColumnFamilyDescriptorBuilder colBuilder =
|
||||||
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
|
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
|
||||||
|
|
||||||
colBuilder.setMaxVersions(1);
|
colBuilder.setMaxVersions(1);
|
||||||
Configuration config = HBaseConfiguration.create();
|
Configuration config = HBaseConfiguration.create();
|
||||||
|
@ -1213,10 +1233,10 @@ public final class BackupSystemTable implements Closeable {
|
||||||
*/
|
*/
|
||||||
public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) {
|
public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) {
|
||||||
TableDescriptorBuilder builder =
|
TableDescriptorBuilder builder =
|
||||||
TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf));
|
TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf));
|
||||||
|
|
||||||
ColumnFamilyDescriptorBuilder colBuilder =
|
ColumnFamilyDescriptorBuilder colBuilder =
|
||||||
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
|
ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
|
||||||
colBuilder.setMaxVersions(1);
|
colBuilder.setMaxVersions(1);
|
||||||
Configuration config = HBaseConfiguration.create();
|
Configuration config = HBaseConfiguration.create();
|
||||||
int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
|
int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
|
||||||
|
@ -1375,11 +1395,11 @@ public final class BackupSystemTable implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Creates Put to write RS last roll log timestamp map
|
* Creates Put to write RS last roll log timestamp map
|
||||||
* @param table table
|
* @param table table
|
||||||
* @param smap map, containing RS:ts
|
* @param smap map, containing RS:ts
|
||||||
* @return put operation
|
* @return put operation
|
||||||
*/
|
*/
|
||||||
private Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap,
|
private Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap,
|
||||||
String backupRoot) {
|
String backupRoot) {
|
||||||
Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString()));
|
Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString()));
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("log-roll-map"), smap);
|
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("log-roll-map"), smap);
|
||||||
return put;
|
return put;
|
||||||
|
@ -1414,12 +1434,12 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates Put to store RS last log result
|
* Creates Put to store RS last log result
|
||||||
* @param server server name
|
* @param server server name
|
||||||
* @param timestamp log roll result (timestamp)
|
* @param timestamp log roll result (timestamp)
|
||||||
* @return put operation
|
* @return put operation
|
||||||
*/
|
*/
|
||||||
private Put createPutForRegionServerLastLogRollResult(String server, Long timestamp,
|
private Put createPutForRegionServerLastLogRollResult(String server, Long timestamp,
|
||||||
String backupRoot) {
|
String backupRoot) {
|
||||||
Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server));
|
Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server));
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("rs-log-ts"),
|
put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("rs-log-ts"),
|
||||||
Bytes.toBytes(timestamp));
|
Bytes.toBytes(timestamp));
|
||||||
|
@ -1458,7 +1478,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles
|
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles
|
||||||
*/
|
*/
|
||||||
static List<Put> createPutForCommittedBulkload(TableName table, byte[] region,
|
static List<Put> createPutForCommittedBulkload(TableName table, byte[] region,
|
||||||
Map<byte[], List<Path>> finalPaths) {
|
Map<byte[], List<Path>> finalPaths) {
|
||||||
List<Put> puts = new ArrayList<>();
|
List<Put> puts = new ArrayList<>();
|
||||||
for (Map.Entry<byte[], List<Path>> entry : finalPaths.entrySet()) {
|
for (Map.Entry<byte[], List<Path>> entry : finalPaths.entrySet()) {
|
||||||
for (Path path : entry.getValue()) {
|
for (Path path : entry.getValue()) {
|
||||||
|
@ -1472,8 +1492,8 @@ public final class BackupSystemTable implements Closeable {
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file));
|
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file));
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
|
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
|
||||||
puts.add(put);
|
puts.add(put);
|
||||||
LOG.debug(
|
LOG
|
||||||
"writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
|
.debug("writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return puts;
|
return puts;
|
||||||
|
@ -1538,7 +1558,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles
|
* Creates Put's for bulk load resulting from running LoadIncrementalHFiles
|
||||||
*/
|
*/
|
||||||
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family,
|
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family,
|
||||||
final List<Pair<Path, Path>> pairs) {
|
final List<Pair<Path, Path>> pairs) {
|
||||||
List<Put> puts = new ArrayList<>(pairs.size());
|
List<Put> puts = new ArrayList<>(pairs.size());
|
||||||
for (Pair<Path, Path> pair : pairs) {
|
for (Pair<Path, Path> pair : pairs) {
|
||||||
Path path = pair.getSecond();
|
Path path = pair.getSecond();
|
||||||
|
@ -1740,8 +1760,8 @@ public final class BackupSystemTable implements Closeable {
|
||||||
*/
|
*/
|
||||||
static Scan createScanForBulkLoadedFiles(String backupId) {
|
static Scan createScanForBulkLoadedFiles(String backupId) {
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
byte[] startRow = backupId == null ? BULK_LOAD_PREFIX_BYTES
|
byte[] startRow =
|
||||||
: rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM);
|
backupId == null ? BULK_LOAD_PREFIX_BYTES : rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM);
|
||||||
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
|
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
|
||||||
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
|
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
|
||||||
scan.withStartRow(startRow);
|
scan.withStartRow(startRow);
|
||||||
|
@ -1752,7 +1772,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
static Put createPutForBulkLoadedFile(TableName tn, byte[] fam, String p, String backupId,
|
static Put createPutForBulkLoadedFile(TableName tn, byte[] fam, String p, String backupId,
|
||||||
long ts, int idx) {
|
long ts, int idx) {
|
||||||
Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM + ts + BLK_LD_DELIM + idx));
|
Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM + ts + BLK_LD_DELIM + idx));
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName());
|
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName());
|
||||||
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam);
|
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam);
|
||||||
|
@ -1798,7 +1818,7 @@ public final class BackupSystemTable implements Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates Put operation to update backup set content
|
* Creates Put operation to update backup set content
|
||||||
* @param name backup set's name
|
* @param name backup set's name
|
||||||
* @param tables list of tables
|
* @param tables list of tables
|
||||||
* @return put operation
|
* @return put operation
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/**
|
/*
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -19,7 +18,6 @@
|
||||||
package org.apache.hadoop.hbase.backup.impl;
|
package org.apache.hadoop.hbase.backup.impl;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue