HADOOP-4687 Moving directories around
git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/HADOOP-4687/core@776176 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c94ff0f240
commit
abe7be9134
BIN
lib/commons-cli-2.0-SNAPSHOT.jar
Normal file
BIN
lib/commons-cli-2.0-SNAPSHOT.jar
Normal file
Binary file not shown.
66
lib/hsqldb-1.8.0.10.LICENSE.txt
Normal file
66
lib/hsqldb-1.8.0.10.LICENSE.txt
Normal file
@ -0,0 +1,66 @@
|
||||
/* Copyright (c) 1995-2000, The Hypersonic SQL Group.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of the Hypersonic SQL Group nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE HYPERSONIC SQL GROUP,
|
||||
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* This software consists of voluntary contributions made by many individuals
|
||||
* on behalf of the Hypersonic SQL Group.
|
||||
*
|
||||
*
|
||||
* For work added by the HSQL Development Group:
|
||||
*
|
||||
* Copyright (c) 2001-2004, The HSQL Development Group
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of the HSQL Development Group nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG,
|
||||
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
BIN
lib/hsqldb-1.8.0.10.jar
Normal file
BIN
lib/hsqldb-1.8.0.10.jar
Normal file
Binary file not shown.
43272
lib/jdiff/hadoop_0.17.0.xml
Normal file
43272
lib/jdiff/hadoop_0.17.0.xml
Normal file
File diff suppressed because it is too large
Load Diff
44778
lib/jdiff/hadoop_0.18.1.xml
Normal file
44778
lib/jdiff/hadoop_0.18.1.xml
Normal file
File diff suppressed because it is too large
Load Diff
38788
lib/jdiff/hadoop_0.18.2.xml
Normal file
38788
lib/jdiff/hadoop_0.18.2.xml
Normal file
File diff suppressed because it is too large
Load Diff
38826
lib/jdiff/hadoop_0.18.3.xml
Normal file
38826
lib/jdiff/hadoop_0.18.3.xml
Normal file
File diff suppressed because it is too large
Load Diff
43972
lib/jdiff/hadoop_0.19.0.xml
Normal file
43972
lib/jdiff/hadoop_0.19.0.xml
Normal file
File diff suppressed because it is too large
Load Diff
44195
lib/jdiff/hadoop_0.19.1.xml
Normal file
44195
lib/jdiff/hadoop_0.19.1.xml
Normal file
File diff suppressed because it is too large
Load Diff
52140
lib/jdiff/hadoop_0.20.0.xml
Normal file
52140
lib/jdiff/hadoop_0.20.0.xml
Normal file
File diff suppressed because it is too large
Load Diff
BIN
lib/kfs-0.2.2.jar
Normal file
BIN
lib/kfs-0.2.2.jar
Normal file
Binary file not shown.
202
lib/kfs-0.2.LICENSE.txt
Normal file
202
lib/kfs-0.2.LICENSE.txt
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
450
src/test/org/apache/hadoop/cli/TestCLI.java
Normal file
450
src/test/org/apache/hadoop/cli/TestCLI.java
Normal file
@ -0,0 +1,450 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.parsers.SAXParser;
|
||||
import javax.xml.parsers.SAXParserFactory;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.cli.util.CLITestData;
|
||||
import org.apache.hadoop.cli.util.CommandExecutor;
|
||||
import org.apache.hadoop.cli.util.ComparatorBase;
|
||||
import org.apache.hadoop.cli.util.ComparatorData;
|
||||
import org.apache.hadoop.cli.util.CLITestData.TestCmd;
|
||||
import org.apache.hadoop.cli.util.CLITestData.TestCmd.CommandType;
|
||||
import org.apache.hadoop.cli.util.CommandExecutor.Result;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.xml.sax.Attributes;
|
||||
import org.xml.sax.SAXException;
|
||||
import org.xml.sax.helpers.DefaultHandler;
|
||||
|
||||
/**
|
||||
* Tests for the Command Line Interface (CLI)
|
||||
*/
|
||||
public class TestCLI extends TestCase {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestCLI.class.getName());
|
||||
|
||||
// In this mode, it runs the command and compares the actual output
|
||||
// with the expected output
|
||||
public static final String TESTMODE_TEST = "test"; // Run the tests
|
||||
|
||||
// If it is set to nocompare, run the command and do not compare.
|
||||
// This can be useful populate the testConfig.xml file the first time
|
||||
// a new command is added
|
||||
public static final String TESTMODE_NOCOMPARE = "nocompare";
|
||||
public static final String TEST_CACHE_DATA_DIR =
|
||||
System.getProperty("test.cache.data", "build/test/cache");
|
||||
|
||||
//By default, run the tests. The other mode is to run the commands and not
|
||||
// compare the output
|
||||
protected String testMode = TESTMODE_TEST;
|
||||
|
||||
// Storage for tests read in from the config file
|
||||
protected ArrayList<CLITestData> testsFromConfigFile = null;
|
||||
protected ArrayList<ComparatorData> testComparators = null;
|
||||
protected String thisTestCaseName = null;
|
||||
protected ComparatorData comparatorData = null;
|
||||
protected Configuration conf = null;
|
||||
protected String clitestDataDir = null;
|
||||
protected String username = null;
|
||||
|
||||
/**
|
||||
* Read the test config file - testConfig.xml
|
||||
*/
|
||||
protected void readTestConfigFile() {
|
||||
String testConfigFile = getTestFile();
|
||||
if (testsFromConfigFile == null) {
|
||||
boolean success = false;
|
||||
testConfigFile = TEST_CACHE_DATA_DIR + File.separator + testConfigFile;
|
||||
try {
|
||||
SAXParser p = (SAXParserFactory.newInstance()).newSAXParser();
|
||||
p.parse(testConfigFile, new TestConfigFileParser());
|
||||
success = true;
|
||||
} catch (Exception e) {
|
||||
LOG.info("File: " + testConfigFile + " not found");
|
||||
success = false;
|
||||
}
|
||||
assertTrue("Error reading test config file", success);
|
||||
}
|
||||
}
|
||||
|
||||
protected String getTestFile() {
|
||||
return "testConf.xml";
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup
|
||||
*/
|
||||
public void setUp() throws Exception {
|
||||
// Read the testConfig.xml file
|
||||
readTestConfigFile();
|
||||
|
||||
conf = new Configuration();
|
||||
conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG,
|
||||
true);
|
||||
|
||||
clitestDataDir = new File(TEST_CACHE_DATA_DIR).
|
||||
toURI().toString().replace(' ', '+');
|
||||
}
|
||||
|
||||
/**
|
||||
* Tear down
|
||||
*/
|
||||
public void tearDown() throws Exception {
|
||||
displayResults();
|
||||
}
|
||||
|
||||
/**
|
||||
* Expand the commands from the test config xml file
|
||||
* @param cmd
|
||||
* @return String expanded command
|
||||
*/
|
||||
protected String expandCommand(final String cmd) {
|
||||
String expCmd = cmd;
|
||||
expCmd = expCmd.replaceAll("CLITEST_DATA", clitestDataDir);
|
||||
expCmd = expCmd.replaceAll("USERNAME", username);
|
||||
|
||||
return expCmd;
|
||||
}
|
||||
|
||||
/**
|
||||
* Display the summarized results
|
||||
*/
|
||||
private void displayResults() {
|
||||
LOG.info("Detailed results:");
|
||||
LOG.info("----------------------------------\n");
|
||||
|
||||
for (int i = 0; i < testsFromConfigFile.size(); i++) {
|
||||
CLITestData td = testsFromConfigFile.get(i);
|
||||
|
||||
boolean testResult = td.getTestResult();
|
||||
|
||||
// Display the details only if there is a failure
|
||||
if (!testResult) {
|
||||
LOG.info("-------------------------------------------");
|
||||
LOG.info(" Test ID: [" + (i + 1) + "]");
|
||||
LOG.info(" Test Description: [" + td.getTestDesc() + "]");
|
||||
LOG.info("");
|
||||
|
||||
ArrayList<TestCmd> testCommands = td.getTestCommands();
|
||||
for (TestCmd cmd : testCommands) {
|
||||
LOG.info(" Test Commands: [" +
|
||||
expandCommand(cmd.getCmd()) + "]");
|
||||
}
|
||||
|
||||
LOG.info("");
|
||||
ArrayList<TestCmd> cleanupCommands = td.getCleanupCommands();
|
||||
for (TestCmd cmd : cleanupCommands) {
|
||||
LOG.info(" Cleanup Commands: [" +
|
||||
expandCommand(cmd.getCmd()) + "]");
|
||||
}
|
||||
|
||||
LOG.info("");
|
||||
ArrayList<ComparatorData> compdata = td.getComparatorData();
|
||||
for (ComparatorData cd : compdata) {
|
||||
boolean resultBoolean = cd.getTestResult();
|
||||
LOG.info(" Comparator: [" +
|
||||
cd.getComparatorType() + "]");
|
||||
LOG.info(" Comparision result: [" +
|
||||
(resultBoolean ? "pass" : "fail") + "]");
|
||||
LOG.info(" Expected output: [" +
|
||||
cd.getExpectedOutput() + "]");
|
||||
LOG.info(" Actual output: [" +
|
||||
cd.getActualOutput() + "]");
|
||||
}
|
||||
LOG.info("");
|
||||
}
|
||||
}
|
||||
|
||||
LOG.info("Summary results:");
|
||||
LOG.info("----------------------------------\n");
|
||||
|
||||
boolean overallResults = true;
|
||||
int totalPass = 0;
|
||||
int totalFail = 0;
|
||||
int totalComparators = 0;
|
||||
for (int i = 0; i < testsFromConfigFile.size(); i++) {
|
||||
CLITestData td = testsFromConfigFile.get(i);
|
||||
totalComparators +=
|
||||
testsFromConfigFile.get(i).getComparatorData().size();
|
||||
boolean resultBoolean = td.getTestResult();
|
||||
if (resultBoolean) {
|
||||
totalPass ++;
|
||||
} else {
|
||||
totalFail ++;
|
||||
}
|
||||
overallResults &= resultBoolean;
|
||||
}
|
||||
|
||||
|
||||
LOG.info(" Testing mode: " + testMode);
|
||||
LOG.info("");
|
||||
LOG.info(" Overall result: " +
|
||||
(overallResults ? "+++ PASS +++" : "--- FAIL ---"));
|
||||
if ((totalPass + totalFail) == 0) {
|
||||
LOG.info(" # Tests pass: " + 0);
|
||||
LOG.info(" # Tests fail: " + 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG.info(" # Tests pass: " + totalPass +
|
||||
" (" + (100 * totalPass / (totalPass + totalFail)) + "%)");
|
||||
LOG.info(" # Tests fail: " + totalFail +
|
||||
" (" + (100 * totalFail / (totalPass + totalFail)) + "%)");
|
||||
}
|
||||
|
||||
LOG.info(" # Validations done: " + totalComparators +
|
||||
" (each test may do multiple validations)");
|
||||
|
||||
LOG.info("");
|
||||
LOG.info("Failing tests:");
|
||||
LOG.info("--------------");
|
||||
int i = 0;
|
||||
boolean foundTests = false;
|
||||
for (i = 0; i < testsFromConfigFile.size(); i++) {
|
||||
boolean resultBoolean = testsFromConfigFile.get(i).getTestResult();
|
||||
if (!resultBoolean) {
|
||||
LOG.info((i + 1) + ": " +
|
||||
testsFromConfigFile.get(i).getTestDesc());
|
||||
foundTests = true;
|
||||
}
|
||||
}
|
||||
if (!foundTests) {
|
||||
LOG.info("NONE");
|
||||
}
|
||||
|
||||
foundTests = false;
|
||||
LOG.info("");
|
||||
LOG.info("Passing tests:");
|
||||
LOG.info("--------------");
|
||||
for (i = 0; i < testsFromConfigFile.size(); i++) {
|
||||
boolean resultBoolean = testsFromConfigFile.get(i).getTestResult();
|
||||
if (resultBoolean) {
|
||||
LOG.info((i + 1) + ": " +
|
||||
testsFromConfigFile.get(i).getTestDesc());
|
||||
foundTests = true;
|
||||
}
|
||||
}
|
||||
if (!foundTests) {
|
||||
LOG.info("NONE");
|
||||
}
|
||||
|
||||
assertTrue("One of the tests failed. " +
|
||||
"See the Detailed results to identify " +
|
||||
"the command that failed", overallResults);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare the actual output with the expected output
|
||||
* @param compdata
|
||||
* @return
|
||||
*/
|
||||
private boolean compareTestOutput(ComparatorData compdata, Result cmdResult) {
|
||||
// Compare the output based on the comparator
|
||||
String comparatorType = compdata.getComparatorType();
|
||||
Class<?> comparatorClass = null;
|
||||
|
||||
// If testMode is "test", then run the command and compare the output
|
||||
// If testMode is "nocompare", then run the command and dump the output.
|
||||
// Do not compare
|
||||
|
||||
boolean compareOutput = false;
|
||||
|
||||
if (testMode.equals(TESTMODE_TEST)) {
|
||||
try {
|
||||
// Initialize the comparator class and run its compare method
|
||||
comparatorClass = Class.forName("org.apache.hadoop.cli.util." +
|
||||
comparatorType);
|
||||
ComparatorBase comp = (ComparatorBase) comparatorClass.newInstance();
|
||||
compareOutput = comp.compare(cmdResult.getCommandOutput(),
|
||||
compdata.getExpectedOutput());
|
||||
} catch (Exception e) {
|
||||
LOG.info("Error in instantiating the comparator" + e);
|
||||
}
|
||||
}
|
||||
|
||||
return compareOutput;
|
||||
}
|
||||
|
||||
/***********************************
|
||||
************* TESTS
|
||||
*********************************/
|
||||
|
||||
public void testAll() {
|
||||
LOG.info("TestAll");
|
||||
|
||||
// Run the tests defined in the testConf.xml config file.
|
||||
for (int index = 0; index < testsFromConfigFile.size(); index++) {
|
||||
|
||||
CLITestData testdata = (CLITestData) testsFromConfigFile.get(index);
|
||||
|
||||
// Execute the test commands
|
||||
ArrayList<TestCmd> testCommands = testdata.getTestCommands();
|
||||
Result cmdResult = null;
|
||||
for (TestCmd cmd : testCommands) {
|
||||
try {
|
||||
cmdResult = execute(cmd);
|
||||
} catch (Exception e) {
|
||||
fail(StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
|
||||
boolean overallTCResult = true;
|
||||
// Run comparators
|
||||
ArrayList<ComparatorData> compdata = testdata.getComparatorData();
|
||||
for (ComparatorData cd : compdata) {
|
||||
final String comptype = cd.getComparatorType();
|
||||
|
||||
boolean compareOutput = false;
|
||||
|
||||
if (! comptype.equalsIgnoreCase("none")) {
|
||||
compareOutput = compareTestOutput(cd, cmdResult);
|
||||
overallTCResult &= compareOutput;
|
||||
}
|
||||
|
||||
cd.setExitCode(cmdResult.getExitCode());
|
||||
cd.setActualOutput(cmdResult.getCommandOutput());
|
||||
cd.setTestResult(compareOutput);
|
||||
}
|
||||
testdata.setTestResult(overallTCResult);
|
||||
|
||||
// Execute the cleanup commands
|
||||
ArrayList<TestCmd> cleanupCommands = testdata.getCleanupCommands();
|
||||
for (TestCmd cmd : cleanupCommands) {
|
||||
try {
|
||||
execute(cmd);
|
||||
} catch (Exception e) {
|
||||
fail(StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected CommandExecutor.Result execute(TestCmd cmd) throws Exception {
|
||||
throw new Exception("Unknow type of Test command:"+ cmd.getType());
|
||||
}
|
||||
|
||||
/*
|
||||
* Parser class for the test config xml file
|
||||
*/
|
||||
class TestConfigFileParser extends DefaultHandler {
|
||||
String charString = null;
|
||||
CLITestData td = null;
|
||||
ArrayList<TestCmd> testCommands = null;
|
||||
ArrayList<TestCmd> cleanupCommands = null;
|
||||
|
||||
@Override
|
||||
public void startDocument() throws SAXException {
|
||||
testsFromConfigFile = new ArrayList<CLITestData>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startElement(String uri,
|
||||
String localName,
|
||||
String qName,
|
||||
Attributes attributes) throws SAXException {
|
||||
if (qName.equals("test")) {
|
||||
td = new CLITestData();
|
||||
} else if (qName.equals("test-commands")) {
|
||||
testCommands = new ArrayList<TestCmd>();
|
||||
} else if (qName.equals("cleanup-commands")) {
|
||||
cleanupCommands = new ArrayList<TestCmd>();
|
||||
} else if (qName.equals("comparators")) {
|
||||
testComparators = new ArrayList<ComparatorData>();
|
||||
} else if (qName.equals("comparator")) {
|
||||
comparatorData = new ComparatorData();
|
||||
}
|
||||
charString = "";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void endElement(String uri,
|
||||
String localName,
|
||||
String qName) throws SAXException {
|
||||
if (qName.equals("description")) {
|
||||
td.setTestDesc(charString);
|
||||
} else if (qName.equals("test-commands")) {
|
||||
td.setTestCommands(testCommands);
|
||||
testCommands = null;
|
||||
} else if (qName.equals("cleanup-commands")) {
|
||||
td.setCleanupCommands(cleanupCommands);
|
||||
cleanupCommands = null;
|
||||
} else if (qName.equals("command")) {
|
||||
if (testCommands != null) {
|
||||
testCommands.add(new TestCmd(charString, CommandType.FS));
|
||||
} else if (cleanupCommands != null) {
|
||||
cleanupCommands.add(new TestCmd(charString, CommandType.FS));
|
||||
}
|
||||
} else if (qName.equals("dfs-admin-command")) {
|
||||
if (testCommands != null) {
|
||||
testCommands.add(new TestCmd(charString, CommandType.DFSADMIN));
|
||||
} else if (cleanupCommands != null) {
|
||||
cleanupCommands.add(new TestCmd(charString, CommandType.DFSADMIN));
|
||||
}
|
||||
} else if (qName.equals("mr-admin-command")) {
|
||||
if (testCommands != null) {
|
||||
testCommands.add(new TestCmd(charString, CommandType.MRADMIN));
|
||||
} else if (cleanupCommands != null) {
|
||||
cleanupCommands.add(new TestCmd(charString, CommandType.MRADMIN));
|
||||
}
|
||||
} else if (qName.equals("archive-command")) {
|
||||
if (testCommands != null) {
|
||||
testCommands.add(new TestCmd(charString, CommandType.ARCHIVE));
|
||||
} else if (cleanupCommands != null) {
|
||||
cleanupCommands.add(new TestCmd(charString, CommandType.ARCHIVE));
|
||||
}
|
||||
} else if (qName.equals("comparators")) {
|
||||
td.setComparatorData(testComparators);
|
||||
} else if (qName.equals("comparator")) {
|
||||
testComparators.add(comparatorData);
|
||||
} else if (qName.equals("type")) {
|
||||
comparatorData.setComparatorType(charString);
|
||||
} else if (qName.equals("expected-output")) {
|
||||
comparatorData.setExpectedOutput(charString);
|
||||
} else if (qName.equals("test")) {
|
||||
testsFromConfigFile.add(td);
|
||||
td = null;
|
||||
} else if (qName.equals("mode")) {
|
||||
testMode = charString;
|
||||
if (!testMode.equals(TESTMODE_NOCOMPARE) &&
|
||||
!testMode.equals(TESTMODE_TEST)) {
|
||||
testMode = TESTMODE_TEST;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void characters(char[] ch,
|
||||
int start,
|
||||
int length) throws SAXException {
|
||||
String s = new String(ch, start, length);
|
||||
charString += s;
|
||||
}
|
||||
}
|
||||
}
|
18
src/test/org/apache/hadoop/cli/testConf.xml
Normal file
18
src/test/org/apache/hadoop/cli/testConf.xml
Normal file
@ -0,0 +1,18 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-stylesheet type="text/xsl" href="testConf.xsl"?>
|
||||
|
||||
<configuration>
|
||||
<!-- Normal mode is test. To run just the commands and dump the output
|
||||
to the log, set it to nocompare -->
|
||||
<mode>test</mode>
|
||||
|
||||
<!-- Comparator types:
|
||||
ExactComparator
|
||||
SubstringComparator
|
||||
RegexpComparator
|
||||
TokenComparator
|
||||
-->
|
||||
<tests>
|
||||
|
||||
</tests>
|
||||
</configuration>
|
28
src/test/org/apache/hadoop/cli/testConf.xsl
Normal file
28
src/test/org/apache/hadoop/cli/testConf.xsl
Normal file
@ -0,0 +1,28 @@
|
||||
<?xml version="1.0" encoding="ISO-8859-1"?>
|
||||
|
||||
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
|
||||
|
||||
<xsl:template match="/">
|
||||
<html>
|
||||
<body>
|
||||
<h2>Hadoop DFS command-line tests</h2>
|
||||
<table border="1">
|
||||
<tr bgcolor="#9acd32">
|
||||
<th align="left">ID</th>
|
||||
<th align="left">Command</th>
|
||||
<th align="left">Description</th>
|
||||
</tr>
|
||||
<xsl:for-each select="configuration/tests/test">
|
||||
<!-- <xsl:sort select="description"/> -->
|
||||
<tr>
|
||||
<td><xsl:value-of select="position()"/></td>
|
||||
<td><xsl:value-of select="substring-before(description,':')"/></td>
|
||||
<td><xsl:value-of select="substring-after(description,':')"/></td>
|
||||
</tr>
|
||||
</xsl:for-each>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
</xsl:template>
|
||||
|
||||
</xsl:stylesheet>
|
136
src/test/org/apache/hadoop/cli/util/CLITestData.java
Normal file
136
src/test/org/apache/hadoop/cli/util/CLITestData.java
Normal file
@ -0,0 +1,136 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli.util;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
/**
|
||||
*
|
||||
* Class to store CLI Test Data
|
||||
*/
|
||||
public class CLITestData {
|
||||
private String testDesc = null;
|
||||
private ArrayList<TestCmd> testCommands = null;
|
||||
private ArrayList<TestCmd> cleanupCommands = null;
|
||||
private ArrayList<ComparatorData> comparatorData = null;
|
||||
private boolean testResult = false;
|
||||
|
||||
public CLITestData() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Class to define Test Command. includes type of the command and command itself
|
||||
* Valid types FS, DFSADMIN, MRADMIN and ARCHIVE.
|
||||
*/
|
||||
static public class TestCmd {
|
||||
public enum CommandType {
|
||||
FS,
|
||||
DFSADMIN,
|
||||
MRADMIN,
|
||||
ARCHIVE
|
||||
}
|
||||
private final CommandType type;
|
||||
private final String cmd;
|
||||
|
||||
public TestCmd(String str, CommandType type) {
|
||||
cmd = str;
|
||||
this.type = type;
|
||||
}
|
||||
public CommandType getType() {
|
||||
return type;
|
||||
}
|
||||
public String getCmd() {
|
||||
return cmd;
|
||||
}
|
||||
public String toString() {
|
||||
return cmd;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the testDesc
|
||||
*/
|
||||
public String getTestDesc() {
|
||||
return testDesc;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param testDesc the testDesc to set
|
||||
*/
|
||||
public void setTestDesc(String testDesc) {
|
||||
this.testDesc = testDesc;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the testCommands
|
||||
*/
|
||||
public ArrayList<TestCmd> getTestCommands() {
|
||||
return testCommands;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param testCommands the testCommands to set
|
||||
*/
|
||||
public void setTestCommands(ArrayList<TestCmd> testCommands) {
|
||||
this.testCommands = testCommands;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the comparatorData
|
||||
*/
|
||||
public ArrayList<ComparatorData> getComparatorData() {
|
||||
return comparatorData;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param comparatorData the comparatorData to set
|
||||
*/
|
||||
public void setComparatorData(ArrayList<ComparatorData> comparatorData) {
|
||||
this.comparatorData = comparatorData;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the testResult
|
||||
*/
|
||||
public boolean getTestResult() {
|
||||
return testResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param testResult the testResult to set
|
||||
*/
|
||||
public void setTestResult(boolean testResult) {
|
||||
this.testResult = testResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the cleanupCommands
|
||||
*/
|
||||
public ArrayList<TestCmd> getCleanupCommands() {
|
||||
return cleanupCommands;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param cleanupCommands the cleanupCommands to set
|
||||
*/
|
||||
public void setCleanupCommands(ArrayList<TestCmd> cleanupCommands) {
|
||||
this.cleanupCommands = cleanupCommands;
|
||||
}
|
||||
}
|
111
src/test/org/apache/hadoop/cli/util/CommandExecutor.java
Normal file
111
src/test/org/apache/hadoop/cli/util/CommandExecutor.java
Normal file
@ -0,0 +1,111 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli.util;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.PrintStream;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
import org.apache.hadoop.cli.TestCLI;
|
||||
|
||||
/**
|
||||
*
|
||||
* This class execute commands and captures the output
|
||||
*/
|
||||
public abstract class CommandExecutor {
|
||||
protected String[] getCommandAsArgs(final String cmd, final String masterKey,
|
||||
final String master) {
|
||||
StringTokenizer tokenizer = new StringTokenizer(cmd, " ");
|
||||
String[] args = new String[tokenizer.countTokens()];
|
||||
|
||||
int i = 0;
|
||||
while (tokenizer.hasMoreTokens()) {
|
||||
args[i] = tokenizer.nextToken();
|
||||
|
||||
args[i] = args[i].replaceAll(masterKey, master);
|
||||
args[i] = args[i].replaceAll("CLITEST_DATA",
|
||||
new File(TestCLI.TEST_CACHE_DATA_DIR).
|
||||
toURI().toString().replace(' ', '+'));
|
||||
args[i] = args[i].replaceAll("USERNAME", System.getProperty("user.name"));
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
return args;
|
||||
}
|
||||
|
||||
public Result executeCommand(final String cmd) throws Exception {
|
||||
int exitCode = 0;
|
||||
Exception lastException = null;
|
||||
|
||||
|
||||
ByteArrayOutputStream bao = new ByteArrayOutputStream();
|
||||
PrintStream origOut = System.out;
|
||||
PrintStream origErr = System.err;
|
||||
|
||||
System.setOut(new PrintStream(bao));
|
||||
System.setErr(new PrintStream(bao));
|
||||
|
||||
try {
|
||||
execute(cmd);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
lastException = e;
|
||||
exitCode = -1;
|
||||
} finally {
|
||||
System.setOut(origOut);
|
||||
System.setErr(origErr);
|
||||
}
|
||||
return new Result(bao.toString(), exitCode, lastException, cmd);
|
||||
}
|
||||
|
||||
protected abstract void execute(final String cmd) throws Exception;
|
||||
|
||||
public static class Result {
|
||||
final String commandOutput;
|
||||
final int exitCode;
|
||||
final Exception exception;
|
||||
final String cmdExecuted;
|
||||
public Result(String commandOutput, int exitCode, Exception exception,
|
||||
String cmdExecuted) {
|
||||
this.commandOutput = commandOutput;
|
||||
this.exitCode = exitCode;
|
||||
this.exception = exception;
|
||||
this.cmdExecuted = cmdExecuted;
|
||||
}
|
||||
|
||||
public String getCommandOutput() {
|
||||
return commandOutput;
|
||||
}
|
||||
|
||||
public int getExitCode() {
|
||||
return exitCode;
|
||||
}
|
||||
|
||||
public Exception getException() {
|
||||
return exception;
|
||||
}
|
||||
|
||||
public String getCommand() {
|
||||
return cmdExecuted;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
39
src/test/org/apache/hadoop/cli/util/ComparatorBase.java
Normal file
39
src/test/org/apache/hadoop/cli/util/ComparatorBase.java
Normal file
@ -0,0 +1,39 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli.util;
|
||||
|
||||
/**
|
||||
*
|
||||
* Comparator interface. To define a new comparator, implement the compare
|
||||
* method
|
||||
*/
|
||||
public abstract class ComparatorBase {
|
||||
public ComparatorBase() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare method for the comparator class.
|
||||
* @param actual output. can be null
|
||||
* @param expected output. can be null
|
||||
* @return true if expected output compares with the actual output, else
|
||||
* return false. If actual or expected is null, return false
|
||||
*/
|
||||
public abstract boolean compare(String actual, String expected);
|
||||
}
|
106
src/test/org/apache/hadoop/cli/util/ComparatorData.java
Normal file
106
src/test/org/apache/hadoop/cli/util/ComparatorData.java
Normal file
@ -0,0 +1,106 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli.util;
|
||||
|
||||
/**
|
||||
*
|
||||
* Class to store CLI Test Comparators Data
|
||||
*/
|
||||
public class ComparatorData {
|
||||
private String expectedOutput = null;
|
||||
private String actualOutput = null;
|
||||
private boolean testResult = false;
|
||||
private int exitCode = 0;
|
||||
private String comparatorType = null;
|
||||
|
||||
public ComparatorData() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the expectedOutput
|
||||
*/
|
||||
public String getExpectedOutput() {
|
||||
return expectedOutput;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param expectedOutput the expectedOutput to set
|
||||
*/
|
||||
public void setExpectedOutput(String expectedOutput) {
|
||||
this.expectedOutput = expectedOutput;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the actualOutput
|
||||
*/
|
||||
public String getActualOutput() {
|
||||
return actualOutput;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param actualOutput the actualOutput to set
|
||||
*/
|
||||
public void setActualOutput(String actualOutput) {
|
||||
this.actualOutput = actualOutput;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the testResult
|
||||
*/
|
||||
public boolean getTestResult() {
|
||||
return testResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param testResult the testResult to set
|
||||
*/
|
||||
public void setTestResult(boolean testResult) {
|
||||
this.testResult = testResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the exitCode
|
||||
*/
|
||||
public int getExitCode() {
|
||||
return exitCode;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param exitCode the exitCode to set
|
||||
*/
|
||||
public void setExitCode(int exitCode) {
|
||||
this.exitCode = exitCode;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the comparatorType
|
||||
*/
|
||||
public String getComparatorType() {
|
||||
return comparatorType;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param comparatorType the comparatorType to set
|
||||
*/
|
||||
public void setComparatorType(String comparatorType) {
|
||||
this.comparatorType = comparatorType;
|
||||
}
|
||||
|
||||
}
|
34
src/test/org/apache/hadoop/cli/util/ExactComparator.java
Normal file
34
src/test/org/apache/hadoop/cli/util/ExactComparator.java
Normal file
@ -0,0 +1,34 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli.util;
|
||||
|
||||
/**
|
||||
* Comparator for the Command line tests.
|
||||
*
|
||||
* This comparator compares the actual to the expected and
|
||||
* returns true only if they are the same
|
||||
*
|
||||
*/
|
||||
public class ExactComparator extends ComparatorBase {
|
||||
|
||||
@Override
|
||||
public boolean compare(String actual, String expected) {
|
||||
return actual.equals(expected);
|
||||
}
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli.util;
|
||||
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Comparator for command line tests that attempts to find a regexp
|
||||
* within the entire text returned by a command.
|
||||
*
|
||||
* This comparator differs from RegexpComparator in that it attempts
|
||||
* to match the pattern within all of the text returned by the command,
|
||||
* rather than matching against each line of the returned text. This
|
||||
* allows matching against patterns that span multiple lines.
|
||||
*/
|
||||
public class RegexpAcrossOutputComparator extends ComparatorBase {
|
||||
|
||||
@Override
|
||||
public boolean compare(String actual, String expected) {
|
||||
return Pattern.compile(expected).matcher(actual).find();
|
||||
}
|
||||
|
||||
}
|
50
src/test/org/apache/hadoop/cli/util/RegexpComparator.java
Normal file
50
src/test/org/apache/hadoop/cli/util/RegexpComparator.java
Normal file
@ -0,0 +1,50 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli.util;
|
||||
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Comparator for the Command line tests.
|
||||
*
|
||||
* This comparator searches for the regular expression specified in 'expected'
|
||||
* in the string 'actual' and returns true if the regular expression match is
|
||||
* done
|
||||
*
|
||||
*/
|
||||
public class RegexpComparator extends ComparatorBase {
|
||||
|
||||
@Override
|
||||
public boolean compare(String actual, String expected) {
|
||||
boolean success = false;
|
||||
Pattern p = Pattern.compile(expected);
|
||||
|
||||
StringTokenizer tokenizer = new StringTokenizer(actual, "\n\r");
|
||||
while (tokenizer.hasMoreTokens() && !success) {
|
||||
String actualToken = tokenizer.nextToken();
|
||||
Matcher m = p.matcher(actualToken);
|
||||
success = m.matches();
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
}
|
33
src/test/org/apache/hadoop/cli/util/SubstringComparator.java
Normal file
33
src/test/org/apache/hadoop/cli/util/SubstringComparator.java
Normal file
@ -0,0 +1,33 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli.util;
|
||||
|
||||
public class SubstringComparator extends ComparatorBase {
|
||||
|
||||
@Override
|
||||
public boolean compare(String actual, String expected) {
|
||||
int compareOutput = actual.indexOf(expected);
|
||||
if (compareOutput == -1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
49
src/test/org/apache/hadoop/cli/util/TokenComparator.java
Normal file
49
src/test/org/apache/hadoop/cli/util/TokenComparator.java
Normal file
@ -0,0 +1,49 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli.util;
|
||||
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
/**
|
||||
* Comparator for the Command line tests.
|
||||
*
|
||||
* This comparator compares each token in the expected output and returns true
|
||||
* if all tokens are in the actual output
|
||||
*
|
||||
*/
|
||||
public class TokenComparator extends ComparatorBase {
|
||||
|
||||
@Override
|
||||
public boolean compare(String actual, String expected) {
|
||||
boolean compareOutput = true;
|
||||
|
||||
StringTokenizer tokenizer = new StringTokenizer(expected, ",\n\r");
|
||||
|
||||
while (tokenizer.hasMoreTokens()) {
|
||||
String token = tokenizer.nextToken();
|
||||
if (actual.indexOf(token) != -1) {
|
||||
compareOutput &= true;
|
||||
} else {
|
||||
compareOutput &= false;
|
||||
}
|
||||
}
|
||||
|
||||
return compareOutput;
|
||||
}
|
||||
}
|
392
src/test/org/apache/hadoop/conf/TestConfiguration.java
Normal file
392
src/test/org/apache/hadoop/conf/TestConfiguration.java
Normal file
@ -0,0 +1,392 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.conf;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Random;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
|
||||
public class TestConfiguration extends TestCase {
|
||||
|
||||
private Configuration conf;
|
||||
final static String CONFIG = new File("./test-config.xml").getAbsolutePath();
|
||||
final static String CONFIG2 = new File("./test-config2.xml").getAbsolutePath();
|
||||
final static Random RAN = new Random();
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
conf = new Configuration();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
new File(CONFIG).delete();
|
||||
new File(CONFIG2).delete();
|
||||
}
|
||||
|
||||
private void startConfig() throws IOException{
|
||||
out.write("<?xml version=\"1.0\"?>\n");
|
||||
out.write("<configuration>\n");
|
||||
}
|
||||
|
||||
private void endConfig() throws IOException{
|
||||
out.write("</configuration>\n");
|
||||
out.close();
|
||||
}
|
||||
|
||||
private void addInclude(String filename) throws IOException{
|
||||
out.write("<xi:include href=\"" + filename + "\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" />\n ");
|
||||
}
|
||||
|
||||
public void testVariableSubstitution() throws IOException {
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
declareProperty("my.int", "${intvar}", "42");
|
||||
declareProperty("intvar", "42", "42");
|
||||
declareProperty("my.base", "/tmp/${user.name}", UNSPEC);
|
||||
declareProperty("my.file", "hello", "hello");
|
||||
declareProperty("my.suffix", ".txt", ".txt");
|
||||
declareProperty("my.relfile", "${my.file}${my.suffix}", "hello.txt");
|
||||
declareProperty("my.fullfile", "${my.base}/${my.file}${my.suffix}", UNSPEC);
|
||||
// check that undefined variables are returned as-is
|
||||
declareProperty("my.failsexpand", "a${my.undefvar}b", "a${my.undefvar}b");
|
||||
endConfig();
|
||||
Path fileResource = new Path(CONFIG);
|
||||
conf.addResource(fileResource);
|
||||
|
||||
for (Prop p : props) {
|
||||
System.out.println("p=" + p.name);
|
||||
String gotVal = conf.get(p.name);
|
||||
String gotRawVal = conf.getRaw(p.name);
|
||||
assertEq(p.val, gotRawVal);
|
||||
if (p.expectEval == UNSPEC) {
|
||||
// expansion is system-dependent (uses System properties)
|
||||
// can't do exact match so just check that all variables got expanded
|
||||
assertTrue(gotVal != null && -1 == gotVal.indexOf("${"));
|
||||
} else {
|
||||
assertEq(p.expectEval, gotVal);
|
||||
}
|
||||
}
|
||||
|
||||
// check that expansion also occurs for getInt()
|
||||
assertTrue(conf.getInt("intvar", -1) == 42);
|
||||
assertTrue(conf.getInt("my.int", -1) == 42);
|
||||
}
|
||||
|
||||
public static void assertEq(Object a, Object b) {
|
||||
System.out.println("assertEq: " + a + ", " + b);
|
||||
assertEquals(a, b);
|
||||
}
|
||||
|
||||
static class Prop {
|
||||
String name;
|
||||
String val;
|
||||
String expectEval;
|
||||
}
|
||||
|
||||
final String UNSPEC = null;
|
||||
ArrayList<Prop> props = new ArrayList<Prop>();
|
||||
|
||||
void declareProperty(String name, String val, String expectEval)
|
||||
throws IOException {
|
||||
declareProperty(name, val, expectEval, false);
|
||||
}
|
||||
|
||||
void declareProperty(String name, String val, String expectEval,
|
||||
boolean isFinal)
|
||||
throws IOException {
|
||||
appendProperty(name, val, isFinal);
|
||||
Prop p = new Prop();
|
||||
p.name = name;
|
||||
p.val = val;
|
||||
p.expectEval = expectEval;
|
||||
props.add(p);
|
||||
}
|
||||
|
||||
void appendProperty(String name, String val) throws IOException {
|
||||
appendProperty(name, val, false);
|
||||
}
|
||||
|
||||
void appendProperty(String name, String val, boolean isFinal)
|
||||
throws IOException {
|
||||
out.write("<property>");
|
||||
out.write("<name>");
|
||||
out.write(name);
|
||||
out.write("</name>");
|
||||
out.write("<value>");
|
||||
out.write(val);
|
||||
out.write("</value>");
|
||||
if (isFinal) {
|
||||
out.write("<final>true</final>");
|
||||
}
|
||||
out.write("</property>\n");
|
||||
}
|
||||
|
||||
public void testOverlay() throws IOException{
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
appendProperty("a","b");
|
||||
appendProperty("b","c");
|
||||
appendProperty("d","e");
|
||||
appendProperty("e","f", true);
|
||||
endConfig();
|
||||
|
||||
out=new BufferedWriter(new FileWriter(CONFIG2));
|
||||
startConfig();
|
||||
appendProperty("a","b");
|
||||
appendProperty("b","d");
|
||||
appendProperty("e","e");
|
||||
endConfig();
|
||||
|
||||
Path fileResource = new Path(CONFIG);
|
||||
conf.addResource(fileResource);
|
||||
|
||||
//set dynamically something
|
||||
conf.set("c","d");
|
||||
conf.set("a","d");
|
||||
|
||||
Configuration clone=new Configuration(conf);
|
||||
clone.addResource(new Path(CONFIG2));
|
||||
|
||||
assertEquals(clone.get("a"), "d");
|
||||
assertEquals(clone.get("b"), "d");
|
||||
assertEquals(clone.get("c"), "d");
|
||||
assertEquals(clone.get("d"), "e");
|
||||
assertEquals(clone.get("e"), "f");
|
||||
|
||||
}
|
||||
|
||||
public void testCommentsInValue() throws IOException {
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
appendProperty("my.comment", "this <!--comment here--> contains a comment");
|
||||
endConfig();
|
||||
Path fileResource = new Path(CONFIG);
|
||||
conf.addResource(fileResource);
|
||||
//two spaces one after "this", one before "contains"
|
||||
assertEquals("this contains a comment", conf.get("my.comment"));
|
||||
}
|
||||
|
||||
public void testTrim() throws IOException {
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
String[] whitespaces = {"", " ", "\n", "\t"};
|
||||
String[] name = new String[100];
|
||||
for(int i = 0; i < name.length; i++) {
|
||||
name[i] = "foo" + i;
|
||||
StringBuilder prefix = new StringBuilder();
|
||||
StringBuilder postfix = new StringBuilder();
|
||||
for(int j = 0; j < 3; j++) {
|
||||
prefix.append(whitespaces[RAN.nextInt(whitespaces.length)]);
|
||||
postfix.append(whitespaces[RAN.nextInt(whitespaces.length)]);
|
||||
}
|
||||
|
||||
appendProperty(prefix + name[i] + postfix, name[i] + ".value");
|
||||
}
|
||||
endConfig();
|
||||
|
||||
conf.addResource(new Path(CONFIG));
|
||||
for(String n : name) {
|
||||
assertEquals(n + ".value", conf.get(n));
|
||||
}
|
||||
}
|
||||
|
||||
public void testToString() throws IOException {
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
endConfig();
|
||||
Path fileResource = new Path(CONFIG);
|
||||
conf.addResource(fileResource);
|
||||
|
||||
String expectedOutput =
|
||||
"Configuration: core-default.xml, core-site.xml, " +
|
||||
fileResource.toString();
|
||||
assertEquals(expectedOutput, conf.toString());
|
||||
}
|
||||
|
||||
public void testIncludes() throws Exception {
|
||||
tearDown();
|
||||
System.out.println("XXX testIncludes");
|
||||
out=new BufferedWriter(new FileWriter(CONFIG2));
|
||||
startConfig();
|
||||
appendProperty("a","b");
|
||||
appendProperty("c","d");
|
||||
endConfig();
|
||||
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
addInclude(CONFIG2);
|
||||
appendProperty("e","f");
|
||||
appendProperty("g","h");
|
||||
endConfig();
|
||||
|
||||
// verify that the includes file contains all properties
|
||||
Path fileResource = new Path(CONFIG);
|
||||
conf.addResource(fileResource);
|
||||
assertEquals(conf.get("a"), "b");
|
||||
assertEquals(conf.get("c"), "d");
|
||||
assertEquals(conf.get("e"), "f");
|
||||
assertEquals(conf.get("g"), "h");
|
||||
tearDown();
|
||||
}
|
||||
|
||||
BufferedWriter out;
|
||||
|
||||
public void testIntegerRanges() {
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("first", "-100");
|
||||
conf.set("second", "4-6,9-10,27");
|
||||
conf.set("third", "34-");
|
||||
Configuration.IntegerRanges range = conf.getRange("first", null);
|
||||
System.out.println("first = " + range);
|
||||
assertEquals(true, range.isIncluded(0));
|
||||
assertEquals(true, range.isIncluded(1));
|
||||
assertEquals(true, range.isIncluded(100));
|
||||
assertEquals(false, range.isIncluded(101));
|
||||
range = conf.getRange("second", null);
|
||||
System.out.println("second = " + range);
|
||||
assertEquals(false, range.isIncluded(3));
|
||||
assertEquals(true, range.isIncluded(4));
|
||||
assertEquals(true, range.isIncluded(6));
|
||||
assertEquals(false, range.isIncluded(7));
|
||||
assertEquals(false, range.isIncluded(8));
|
||||
assertEquals(true, range.isIncluded(9));
|
||||
assertEquals(true, range.isIncluded(10));
|
||||
assertEquals(false, range.isIncluded(11));
|
||||
assertEquals(false, range.isIncluded(26));
|
||||
assertEquals(true, range.isIncluded(27));
|
||||
assertEquals(false, range.isIncluded(28));
|
||||
range = conf.getRange("third", null);
|
||||
System.out.println("third = " + range);
|
||||
assertEquals(false, range.isIncluded(33));
|
||||
assertEquals(true, range.isIncluded(34));
|
||||
assertEquals(true, range.isIncluded(100000000));
|
||||
}
|
||||
|
||||
public void testHexValues() throws IOException{
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
appendProperty("test.hex1", "0x10");
|
||||
appendProperty("test.hex2", "0xF");
|
||||
appendProperty("test.hex3", "-0x10");
|
||||
endConfig();
|
||||
Path fileResource = new Path(CONFIG);
|
||||
conf.addResource(fileResource);
|
||||
assertEquals(16, conf.getInt("test.hex1", 0));
|
||||
assertEquals(16, conf.getLong("test.hex1", 0));
|
||||
assertEquals(15, conf.getInt("test.hex2", 0));
|
||||
assertEquals(15, conf.getLong("test.hex2", 0));
|
||||
assertEquals(-16, conf.getInt("test.hex3", 0));
|
||||
assertEquals(-16, conf.getLong("test.hex3", 0));
|
||||
|
||||
}
|
||||
|
||||
public void testIntegerValues() throws IOException{
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
appendProperty("test.int1", "20");
|
||||
appendProperty("test.int2", "020");
|
||||
appendProperty("test.int3", "-20");
|
||||
endConfig();
|
||||
Path fileResource = new Path(CONFIG);
|
||||
conf.addResource(fileResource);
|
||||
assertEquals(20, conf.getInt("test.int1", 0));
|
||||
assertEquals(20, conf.getLong("test.int1", 0));
|
||||
assertEquals(20, conf.getInt("test.int2", 0));
|
||||
assertEquals(20, conf.getLong("test.int2", 0));
|
||||
assertEquals(-20, conf.getInt("test.int3", 0));
|
||||
assertEquals(-20, conf.getLong("test.int3", 0));
|
||||
}
|
||||
|
||||
public void testReload() throws IOException {
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
appendProperty("test.key1", "final-value1", true);
|
||||
appendProperty("test.key2", "value2");
|
||||
endConfig();
|
||||
Path fileResource = new Path(CONFIG);
|
||||
conf.addResource(fileResource);
|
||||
|
||||
out=new BufferedWriter(new FileWriter(CONFIG2));
|
||||
startConfig();
|
||||
appendProperty("test.key1", "value1");
|
||||
appendProperty("test.key3", "value3");
|
||||
endConfig();
|
||||
Path fileResource1 = new Path(CONFIG2);
|
||||
conf.addResource(fileResource1);
|
||||
|
||||
// add a few values via set.
|
||||
conf.set("test.key3", "value4");
|
||||
conf.set("test.key4", "value5");
|
||||
|
||||
assertEquals("final-value1", conf.get("test.key1"));
|
||||
assertEquals("value2", conf.get("test.key2"));
|
||||
assertEquals("value4", conf.get("test.key3"));
|
||||
assertEquals("value5", conf.get("test.key4"));
|
||||
|
||||
// change values in the test file...
|
||||
out=new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
appendProperty("test.key1", "final-value1");
|
||||
appendProperty("test.key3", "final-value3", true);
|
||||
endConfig();
|
||||
|
||||
conf.reloadConfiguration();
|
||||
assertEquals("value1", conf.get("test.key1"));
|
||||
// overlayed property overrides.
|
||||
assertEquals("value4", conf.get("test.key3"));
|
||||
assertEquals(null, conf.get("test.key2"));
|
||||
assertEquals("value5", conf.get("test.key4"));
|
||||
}
|
||||
|
||||
public void testSize() throws IOException {
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("a", "A");
|
||||
conf.set("b", "B");
|
||||
assertEquals(2, conf.size());
|
||||
}
|
||||
|
||||
public void testClear() throws IOException {
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("a", "A");
|
||||
conf.set("b", "B");
|
||||
conf.clear();
|
||||
assertEquals(0, conf.size());
|
||||
assertFalse(conf.iterator().hasNext());
|
||||
}
|
||||
|
||||
public static void main(String[] argv) throws Exception {
|
||||
junit.textui.TestRunner.main(new String[]{
|
||||
TestConfiguration.class.getName()
|
||||
});
|
||||
}
|
||||
}
|
102
src/test/org/apache/hadoop/conf/TestConfigurationSubclass.java
Normal file
102
src/test/org/apache/hadoop/conf/TestConfigurationSubclass.java
Normal file
@ -0,0 +1,102 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.conf;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* Created 21-Jan-2009 13:42:36
|
||||
*/
|
||||
|
||||
public class TestConfigurationSubclass extends TestCase {
|
||||
private static final String EMPTY_CONFIGURATION_XML
|
||||
= "/org/apache/hadoop/conf/empty-configuration.xml";
|
||||
|
||||
|
||||
public void testGetProps() {
|
||||
SubConf conf = new SubConf(true);
|
||||
Properties properties = conf.getProperties();
|
||||
assertNotNull("hadoop.tmp.dir is not set",
|
||||
properties.getProperty("hadoop.tmp.dir"));
|
||||
}
|
||||
|
||||
public void testReload() throws Throwable {
|
||||
SubConf conf = new SubConf(true);
|
||||
assertFalse(conf.isReloaded());
|
||||
Configuration.addDefaultResource(EMPTY_CONFIGURATION_XML);
|
||||
assertTrue(conf.isReloaded());
|
||||
Properties properties = conf.getProperties();
|
||||
}
|
||||
|
||||
public void testReloadNotQuiet() throws Throwable {
|
||||
SubConf conf = new SubConf(true);
|
||||
conf.setQuietMode(false);
|
||||
assertFalse(conf.isReloaded());
|
||||
conf.addResource("not-a-valid-resource");
|
||||
assertTrue(conf.isReloaded());
|
||||
try {
|
||||
Properties properties = conf.getProperties();
|
||||
fail("Should not have got here");
|
||||
} catch (RuntimeException e) {
|
||||
assertTrue(e.toString(),e.getMessage().contains("not found"));
|
||||
}
|
||||
}
|
||||
|
||||
private static class SubConf extends Configuration {
|
||||
|
||||
private boolean reloaded;
|
||||
|
||||
/**
|
||||
* A new configuration where the behavior of reading from the default resources
|
||||
* can be turned off.
|
||||
*
|
||||
* If the parameter {@code loadDefaults} is false, the new instance will not
|
||||
* load resources from the default files.
|
||||
*
|
||||
* @param loadDefaults specifies whether to load from the default files
|
||||
*/
|
||||
private SubConf(boolean loadDefaults) {
|
||||
super(loadDefaults);
|
||||
}
|
||||
|
||||
public Properties getProperties() {
|
||||
return super.getProps();
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}.
|
||||
* Sets the reloaded flag.
|
||||
*/
|
||||
@Override
|
||||
public void reloadConfiguration() {
|
||||
super.reloadConfiguration();
|
||||
reloaded = true;
|
||||
}
|
||||
|
||||
public boolean isReloaded() {
|
||||
return reloaded;
|
||||
}
|
||||
|
||||
public void setReloaded(boolean reloaded) {
|
||||
this.reloaded = reloaded;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
74
src/test/org/apache/hadoop/conf/TestGetInstances.java
Normal file
74
src/test/org/apache/hadoop/conf/TestGetInstances.java
Normal file
@ -0,0 +1,74 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.conf;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestGetInstances extends TestCase {
|
||||
|
||||
interface SampleInterface {}
|
||||
|
||||
interface ChildInterface extends SampleInterface {}
|
||||
|
||||
static class SampleClass implements SampleInterface {
|
||||
SampleClass() {}
|
||||
}
|
||||
|
||||
static class AnotherClass implements ChildInterface {
|
||||
AnotherClass() {}
|
||||
}
|
||||
|
||||
/**
|
||||
* Makes sure <code>Configuration.getInstances()</code> returns
|
||||
* instances of the required type.
|
||||
*/
|
||||
public void testGetInstances() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
|
||||
List<SampleInterface> classes =
|
||||
conf.getInstances("no.such.property", SampleInterface.class);
|
||||
assertTrue(classes.isEmpty());
|
||||
|
||||
conf.set("empty.property", "");
|
||||
classes = conf.getInstances("empty.property", SampleInterface.class);
|
||||
assertTrue(classes.isEmpty());
|
||||
|
||||
conf.setStrings("some.classes",
|
||||
SampleClass.class.getName(), AnotherClass.class.getName());
|
||||
classes = conf.getInstances("some.classes", SampleInterface.class);
|
||||
assertEquals(2, classes.size());
|
||||
|
||||
try {
|
||||
conf.setStrings("some.classes",
|
||||
SampleClass.class.getName(), AnotherClass.class.getName(),
|
||||
String.class.getName());
|
||||
conf.getInstances("some.classes", SampleInterface.class);
|
||||
fail("java.lang.String does not implement SampleInterface");
|
||||
} catch (RuntimeException e) {}
|
||||
|
||||
try {
|
||||
conf.setStrings("some.classes",
|
||||
SampleClass.class.getName(), AnotherClass.class.getName(),
|
||||
"no.such.Class");
|
||||
conf.getInstances("some.classes", SampleInterface.class);
|
||||
fail("no.such.Class does not exist");
|
||||
} catch (RuntimeException e) {}
|
||||
}
|
||||
}
|
4
src/test/org/apache/hadoop/conf/empty-configuration.xml
Normal file
4
src/test/org/apache/hadoop/conf/empty-configuration.xml
Normal file
@ -0,0 +1,4 @@
|
||||
<?xml version="1.0"?>
|
||||
<configuration>
|
||||
</configuration>
|
||||
|
@ -0,0 +1,77 @@
|
||||
package org.apache.hadoop.filecache;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestDistributedCache extends TestCase {
|
||||
|
||||
static final URI LOCAL_FS = URI.create("file:///");
|
||||
private static String TEST_CACHE_BASE_DIR =
|
||||
new Path(System.getProperty("test.build.data","/tmp/cachebasedir"))
|
||||
.toString().replace(' ', '+');
|
||||
private static String TEST_ROOT_DIR =
|
||||
System.getProperty("test.build.data", "/tmp/distributedcache");
|
||||
private static final int TEST_FILE_SIZE = 4 * 1024; // 4K
|
||||
private static final int LOCAL_CACHE_LIMIT = 5 * 1024; //5K
|
||||
private Configuration conf;
|
||||
private Path firstCacheFile;
|
||||
private Path secondCacheFile;
|
||||
private FileSystem localfs;
|
||||
|
||||
/**
|
||||
* @see TestCase#setUp()
|
||||
*/
|
||||
@Override
|
||||
protected void setUp() throws IOException {
|
||||
conf = new Configuration();
|
||||
conf.setLong("local.cache.size", LOCAL_CACHE_LIMIT);
|
||||
localfs = FileSystem.get(LOCAL_FS, conf);
|
||||
firstCacheFile = new Path(TEST_ROOT_DIR+"/firstcachefile");
|
||||
secondCacheFile = new Path(TEST_ROOT_DIR+"/secondcachefile");
|
||||
createTempFile(localfs, firstCacheFile);
|
||||
createTempFile(localfs, secondCacheFile);
|
||||
}
|
||||
|
||||
/** test delete cache */
|
||||
public void testDeleteCache() throws Exception {
|
||||
DistributedCache.getLocalCache(firstCacheFile.toUri(), conf, new Path(TEST_CACHE_BASE_DIR),
|
||||
false, System.currentTimeMillis(), new Path(TEST_ROOT_DIR));
|
||||
DistributedCache.releaseCache(firstCacheFile.toUri(), conf);
|
||||
//in above code,localized a file of size 4K and then release the cache which will cause the cache
|
||||
//be deleted when the limit goes out. The below code localize another cache which's designed to
|
||||
//sweep away the first cache.
|
||||
DistributedCache.getLocalCache(secondCacheFile.toUri(), conf, new Path(TEST_CACHE_BASE_DIR),
|
||||
false, System.currentTimeMillis(), new Path(TEST_ROOT_DIR));
|
||||
FileStatus[] dirStatuses = localfs.listStatus(new Path(TEST_CACHE_BASE_DIR));
|
||||
assertTrue("DistributedCache failed deleting old cache when the cache store is full.",
|
||||
dirStatuses.length > 1);
|
||||
}
|
||||
|
||||
private void createTempFile(FileSystem fs, Path p) throws IOException {
|
||||
FSDataOutputStream out = fs.create(p);
|
||||
byte[] toWrite = new byte[TEST_FILE_SIZE];
|
||||
new Random().nextBytes(toWrite);
|
||||
out.write(toWrite);
|
||||
out.close();
|
||||
FileSystem.LOG.info("created: " + p + ", size=" + TEST_FILE_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see TestCase#tearDown()
|
||||
*/
|
||||
@Override
|
||||
protected void tearDown() throws IOException {
|
||||
localfs.delete(firstCacheFile, true);
|
||||
localfs.delete(secondCacheFile, true);
|
||||
localfs.close();
|
||||
}
|
||||
}
|
471
src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java
Normal file
471
src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java
Normal file
@ -0,0 +1,471 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A collection of tests for the contract of the {@link FileSystem}.
|
||||
* This test should be used for general-purpose implementations of
|
||||
* {@link FileSystem}, that is, implementations that provide implementations
|
||||
* of all of the functionality of {@link FileSystem}.
|
||||
* </p>
|
||||
* <p>
|
||||
* To test a given {@link FileSystem} implementation create a subclass of this
|
||||
* test and override {@link #setUp()} to initialize the <code>fs</code>
|
||||
* {@link FileSystem} instance variable.
|
||||
* </p>
|
||||
*/
|
||||
public abstract class FileSystemContractBaseTest extends TestCase {
|
||||
|
||||
protected FileSystem fs;
|
||||
private byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
|
||||
{
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
data[i] = (byte) (i % 10);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
fs.delete(path("/test"), true);
|
||||
}
|
||||
|
||||
protected int getBlockSize() {
|
||||
return 1024;
|
||||
}
|
||||
|
||||
protected String getDefaultWorkingDirectory() {
|
||||
return "/user/" + System.getProperty("user.name");
|
||||
}
|
||||
|
||||
protected boolean renameSupported() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void testFsStatus() throws Exception {
|
||||
FsStatus fsStatus = fs.getStatus();
|
||||
assertNotNull(fsStatus);
|
||||
//used, free and capacity are non-negative longs
|
||||
assertTrue(fsStatus.getUsed() >= 0);
|
||||
assertTrue(fsStatus.getRemaining() >= 0);
|
||||
assertTrue(fsStatus.getCapacity() >= 0);
|
||||
}
|
||||
|
||||
public void testWorkingDirectory() throws Exception {
|
||||
|
||||
Path workDir = path(getDefaultWorkingDirectory());
|
||||
assertEquals(workDir, fs.getWorkingDirectory());
|
||||
|
||||
fs.setWorkingDirectory(path("."));
|
||||
assertEquals(workDir, fs.getWorkingDirectory());
|
||||
|
||||
fs.setWorkingDirectory(path(".."));
|
||||
assertEquals(workDir.getParent(), fs.getWorkingDirectory());
|
||||
|
||||
Path relativeDir = path("hadoop");
|
||||
fs.setWorkingDirectory(relativeDir);
|
||||
assertEquals(relativeDir, fs.getWorkingDirectory());
|
||||
|
||||
Path absoluteDir = path("/test/hadoop");
|
||||
fs.setWorkingDirectory(absoluteDir);
|
||||
assertEquals(absoluteDir, fs.getWorkingDirectory());
|
||||
|
||||
}
|
||||
|
||||
public void testMkdirs() throws Exception {
|
||||
Path testDir = path("/test/hadoop");
|
||||
assertFalse(fs.exists(testDir));
|
||||
assertFalse(fs.isFile(testDir));
|
||||
|
||||
assertTrue(fs.mkdirs(testDir));
|
||||
|
||||
assertTrue(fs.exists(testDir));
|
||||
assertFalse(fs.isFile(testDir));
|
||||
|
||||
assertTrue(fs.mkdirs(testDir));
|
||||
|
||||
assertTrue(fs.exists(testDir));
|
||||
assertFalse(fs.isFile(testDir));
|
||||
|
||||
Path parentDir = testDir.getParent();
|
||||
assertTrue(fs.exists(parentDir));
|
||||
assertFalse(fs.isFile(parentDir));
|
||||
|
||||
Path grandparentDir = parentDir.getParent();
|
||||
assertTrue(fs.exists(grandparentDir));
|
||||
assertFalse(fs.isFile(grandparentDir));
|
||||
|
||||
}
|
||||
|
||||
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
|
||||
Path testDir = path("/test/hadoop");
|
||||
assertFalse(fs.exists(testDir));
|
||||
assertTrue(fs.mkdirs(testDir));
|
||||
assertTrue(fs.exists(testDir));
|
||||
|
||||
createFile(path("/test/hadoop/file"));
|
||||
|
||||
Path testSubDir = path("/test/hadoop/file/subdir");
|
||||
try {
|
||||
fs.mkdirs(testSubDir);
|
||||
fail("Should throw IOException.");
|
||||
} catch (IOException e) {
|
||||
// expected
|
||||
}
|
||||
assertFalse(fs.exists(testSubDir));
|
||||
|
||||
Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
|
||||
try {
|
||||
fs.mkdirs(testDeepSubDir);
|
||||
fail("Should throw IOException.");
|
||||
} catch (IOException e) {
|
||||
// expected
|
||||
}
|
||||
assertFalse(fs.exists(testDeepSubDir));
|
||||
|
||||
}
|
||||
|
||||
public void testGetFileStatusThrowsExceptionForNonExistentFile()
|
||||
throws Exception {
|
||||
try {
|
||||
fs.getFileStatus(path("/test/hadoop/file"));
|
||||
fail("Should throw FileNotFoundException");
|
||||
} catch (FileNotFoundException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testListStatusReturnsNullForNonExistentFile() throws Exception {
|
||||
assertNull(fs.listStatus(path("/test/hadoop/file")));
|
||||
}
|
||||
|
||||
public void testListStatus() throws Exception {
|
||||
Path[] testDirs = { path("/test/hadoop/a"),
|
||||
path("/test/hadoop/b"),
|
||||
path("/test/hadoop/c/1"), };
|
||||
assertFalse(fs.exists(testDirs[0]));
|
||||
|
||||
for (Path path : testDirs) {
|
||||
assertTrue(fs.mkdirs(path));
|
||||
}
|
||||
|
||||
FileStatus[] paths = fs.listStatus(path("/test"));
|
||||
assertEquals(1, paths.length);
|
||||
assertEquals(path("/test/hadoop"), paths[0].getPath());
|
||||
|
||||
paths = fs.listStatus(path("/test/hadoop"));
|
||||
assertEquals(3, paths.length);
|
||||
assertEquals(path("/test/hadoop/a"), paths[0].getPath());
|
||||
assertEquals(path("/test/hadoop/b"), paths[1].getPath());
|
||||
assertEquals(path("/test/hadoop/c"), paths[2].getPath());
|
||||
|
||||
paths = fs.listStatus(path("/test/hadoop/a"));
|
||||
assertEquals(0, paths.length);
|
||||
}
|
||||
|
||||
public void testWriteReadAndDeleteEmptyFile() throws Exception {
|
||||
writeReadAndDelete(0);
|
||||
}
|
||||
|
||||
public void testWriteReadAndDeleteHalfABlock() throws Exception {
|
||||
writeReadAndDelete(getBlockSize() / 2);
|
||||
}
|
||||
|
||||
public void testWriteReadAndDeleteOneBlock() throws Exception {
|
||||
writeReadAndDelete(getBlockSize());
|
||||
}
|
||||
|
||||
public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception {
|
||||
writeReadAndDelete(getBlockSize() + (getBlockSize() / 2));
|
||||
}
|
||||
|
||||
public void testWriteReadAndDeleteTwoBlocks() throws Exception {
|
||||
writeReadAndDelete(getBlockSize() * 2);
|
||||
}
|
||||
|
||||
private void writeReadAndDelete(int len) throws IOException {
|
||||
Path path = path("/test/hadoop/file");
|
||||
|
||||
fs.mkdirs(path.getParent());
|
||||
|
||||
FSDataOutputStream out = fs.create(path, false,
|
||||
fs.getConf().getInt("io.file.buffer.size", 4096),
|
||||
(short) 1, getBlockSize());
|
||||
out.write(data, 0, len);
|
||||
out.close();
|
||||
|
||||
assertTrue("Exists", fs.exists(path));
|
||||
assertEquals("Length", len, fs.getFileStatus(path).getLen());
|
||||
|
||||
FSDataInputStream in = fs.open(path);
|
||||
byte[] buf = new byte[len];
|
||||
in.readFully(0, buf);
|
||||
in.close();
|
||||
|
||||
assertEquals(len, buf.length);
|
||||
for (int i = 0; i < buf.length; i++) {
|
||||
assertEquals("Position " + i, data[i], buf[i]);
|
||||
}
|
||||
|
||||
assertTrue("Deleted", fs.delete(path, false));
|
||||
|
||||
assertFalse("No longer exists", fs.exists(path));
|
||||
|
||||
}
|
||||
|
||||
public void testOverwrite() throws IOException {
|
||||
Path path = path("/test/hadoop/file");
|
||||
|
||||
fs.mkdirs(path.getParent());
|
||||
|
||||
createFile(path);
|
||||
|
||||
assertTrue("Exists", fs.exists(path));
|
||||
assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
|
||||
|
||||
try {
|
||||
fs.create(path, false);
|
||||
fail("Should throw IOException.");
|
||||
} catch (IOException e) {
|
||||
// Expected
|
||||
}
|
||||
|
||||
FSDataOutputStream out = fs.create(path, true);
|
||||
out.write(data, 0, data.length);
|
||||
out.close();
|
||||
|
||||
assertTrue("Exists", fs.exists(path));
|
||||
assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
|
||||
|
||||
}
|
||||
|
||||
public void testWriteInNonExistentDirectory() throws IOException {
|
||||
Path path = path("/test/hadoop/file");
|
||||
assertFalse("Parent doesn't exist", fs.exists(path.getParent()));
|
||||
createFile(path);
|
||||
|
||||
assertTrue("Exists", fs.exists(path));
|
||||
assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
|
||||
assertTrue("Parent exists", fs.exists(path.getParent()));
|
||||
}
|
||||
|
||||
public void testDeleteNonExistentFile() throws IOException {
|
||||
Path path = path("/test/hadoop/file");
|
||||
assertFalse("Doesn't exist", fs.exists(path));
|
||||
assertFalse("No deletion", fs.delete(path, true));
|
||||
}
|
||||
|
||||
public void testDeleteRecursively() throws IOException {
|
||||
Path dir = path("/test/hadoop");
|
||||
Path file = path("/test/hadoop/file");
|
||||
Path subdir = path("/test/hadoop/subdir");
|
||||
|
||||
createFile(file);
|
||||
assertTrue("Created subdir", fs.mkdirs(subdir));
|
||||
|
||||
assertTrue("File exists", fs.exists(file));
|
||||
assertTrue("Dir exists", fs.exists(dir));
|
||||
assertTrue("Subdir exists", fs.exists(subdir));
|
||||
|
||||
try {
|
||||
fs.delete(dir, false);
|
||||
fail("Should throw IOException.");
|
||||
} catch (IOException e) {
|
||||
// expected
|
||||
}
|
||||
assertTrue("File still exists", fs.exists(file));
|
||||
assertTrue("Dir still exists", fs.exists(dir));
|
||||
assertTrue("Subdir still exists", fs.exists(subdir));
|
||||
|
||||
assertTrue("Deleted", fs.delete(dir, true));
|
||||
assertFalse("File doesn't exist", fs.exists(file));
|
||||
assertFalse("Dir doesn't exist", fs.exists(dir));
|
||||
assertFalse("Subdir doesn't exist", fs.exists(subdir));
|
||||
}
|
||||
|
||||
public void testDeleteEmptyDirectory() throws IOException {
|
||||
Path dir = path("/test/hadoop");
|
||||
assertTrue(fs.mkdirs(dir));
|
||||
assertTrue("Dir exists", fs.exists(dir));
|
||||
assertTrue("Deleted", fs.delete(dir, false));
|
||||
assertFalse("Dir doesn't exist", fs.exists(dir));
|
||||
}
|
||||
|
||||
public void testRenameNonExistentPath() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = path("/test/hadoop/path");
|
||||
Path dst = path("/test/new/newpath");
|
||||
rename(src, dst, false, false, false);
|
||||
}
|
||||
|
||||
public void testRenameFileMoveToNonExistentDirectory() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = path("/test/hadoop/file");
|
||||
createFile(src);
|
||||
Path dst = path("/test/new/newfile");
|
||||
rename(src, dst, false, true, false);
|
||||
}
|
||||
|
||||
public void testRenameFileMoveToExistingDirectory() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = path("/test/hadoop/file");
|
||||
createFile(src);
|
||||
Path dst = path("/test/new/newfile");
|
||||
fs.mkdirs(dst.getParent());
|
||||
rename(src, dst, true, false, true);
|
||||
}
|
||||
|
||||
public void testRenameFileAsExistingFile() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = path("/test/hadoop/file");
|
||||
createFile(src);
|
||||
Path dst = path("/test/new/newfile");
|
||||
createFile(dst);
|
||||
rename(src, dst, false, true, true);
|
||||
}
|
||||
|
||||
public void testRenameFileAsExistingDirectory() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = path("/test/hadoop/file");
|
||||
createFile(src);
|
||||
Path dst = path("/test/new/newdir");
|
||||
fs.mkdirs(dst);
|
||||
rename(src, dst, true, false, true);
|
||||
assertTrue("Destination changed",
|
||||
fs.exists(path("/test/new/newdir/file")));
|
||||
}
|
||||
|
||||
public void testRenameDirectoryMoveToNonExistentDirectory()
|
||||
throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = path("/test/hadoop/dir");
|
||||
fs.mkdirs(src);
|
||||
Path dst = path("/test/new/newdir");
|
||||
rename(src, dst, false, true, false);
|
||||
}
|
||||
|
||||
public void testRenameDirectoryMoveToExistingDirectory() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = path("/test/hadoop/dir");
|
||||
fs.mkdirs(src);
|
||||
createFile(path("/test/hadoop/dir/file1"));
|
||||
createFile(path("/test/hadoop/dir/subdir/file2"));
|
||||
|
||||
Path dst = path("/test/new/newdir");
|
||||
fs.mkdirs(dst.getParent());
|
||||
rename(src, dst, true, false, true);
|
||||
|
||||
assertFalse("Nested file1 exists",
|
||||
fs.exists(path("/test/hadoop/dir/file1")));
|
||||
assertFalse("Nested file2 exists",
|
||||
fs.exists(path("/test/hadoop/dir/subdir/file2")));
|
||||
assertTrue("Renamed nested file1 exists",
|
||||
fs.exists(path("/test/new/newdir/file1")));
|
||||
assertTrue("Renamed nested exists",
|
||||
fs.exists(path("/test/new/newdir/subdir/file2")));
|
||||
}
|
||||
|
||||
public void testRenameDirectoryAsExistingFile() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = path("/test/hadoop/dir");
|
||||
fs.mkdirs(src);
|
||||
Path dst = path("/test/new/newfile");
|
||||
createFile(dst);
|
||||
rename(src, dst, false, true, true);
|
||||
}
|
||||
|
||||
public void testRenameDirectoryAsExistingDirectory() throws Exception {
|
||||
if (!renameSupported()) return;
|
||||
|
||||
Path src = path("/test/hadoop/dir");
|
||||
fs.mkdirs(src);
|
||||
createFile(path("/test/hadoop/dir/file1"));
|
||||
createFile(path("/test/hadoop/dir/subdir/file2"));
|
||||
|
||||
Path dst = path("/test/new/newdir");
|
||||
fs.mkdirs(dst);
|
||||
rename(src, dst, true, false, true);
|
||||
assertTrue("Destination changed",
|
||||
fs.exists(path("/test/new/newdir/dir")));
|
||||
assertFalse("Nested file1 exists",
|
||||
fs.exists(path("/test/hadoop/dir/file1")));
|
||||
assertFalse("Nested file2 exists",
|
||||
fs.exists(path("/test/hadoop/dir/subdir/file2")));
|
||||
assertTrue("Renamed nested file1 exists",
|
||||
fs.exists(path("/test/new/newdir/dir/file1")));
|
||||
assertTrue("Renamed nested exists",
|
||||
fs.exists(path("/test/new/newdir/dir/subdir/file2")));
|
||||
}
|
||||
|
||||
public void testInputStreamClosedTwice() throws IOException {
|
||||
//HADOOP-4760 according to Closeable#close() closing already-closed
|
||||
//streams should have no effect.
|
||||
Path src = path("/test/hadoop/file");
|
||||
createFile(src);
|
||||
FSDataInputStream in = fs.open(src);
|
||||
in.close();
|
||||
in.close();
|
||||
}
|
||||
|
||||
public void testOutputStreamClosedTwice() throws IOException {
|
||||
//HADOOP-4760 according to Closeable#close() closing already-closed
|
||||
//streams should have no effect.
|
||||
Path src = path("/test/hadoop/file");
|
||||
FSDataOutputStream out = fs.create(src);
|
||||
out.writeChar('H'); //write some data
|
||||
out.close();
|
||||
out.close();
|
||||
}
|
||||
|
||||
protected Path path(String pathString) {
|
||||
return new Path(pathString).makeQualified(fs);
|
||||
}
|
||||
|
||||
protected void createFile(Path path) throws IOException {
|
||||
FSDataOutputStream out = fs.create(path);
|
||||
out.write(data, 0, data.length);
|
||||
out.close();
|
||||
}
|
||||
|
||||
private void rename(Path src, Path dst, boolean renameSucceeded,
|
||||
boolean srcExists, boolean dstExists) throws IOException {
|
||||
assertEquals("Rename result", renameSucceeded, fs.rename(src, dst));
|
||||
assertEquals("Source exists", srcExists, fs.exists(src));
|
||||
assertEquals("Destination exists", dstExists, fs.exists(dst));
|
||||
}
|
||||
}
|
79
src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java
Normal file
79
src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java
Normal file
@ -0,0 +1,79 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestChecksumFileSystem extends TestCase {
|
||||
public void testgetChecksumLength() throws Exception {
|
||||
assertEquals(8, ChecksumFileSystem.getChecksumLength(0L, 512));
|
||||
assertEquals(12, ChecksumFileSystem.getChecksumLength(1L, 512));
|
||||
assertEquals(12, ChecksumFileSystem.getChecksumLength(512L, 512));
|
||||
assertEquals(16, ChecksumFileSystem.getChecksumLength(513L, 512));
|
||||
assertEquals(16, ChecksumFileSystem.getChecksumLength(1023L, 512));
|
||||
assertEquals(16, ChecksumFileSystem.getChecksumLength(1024L, 512));
|
||||
assertEquals(408, ChecksumFileSystem.getChecksumLength(100L, 1));
|
||||
assertEquals(4000000000008L,
|
||||
ChecksumFileSystem.getChecksumLength(10000000000000L, 10));
|
||||
}
|
||||
|
||||
public void testVerifyChecksum() throws Exception {
|
||||
String TEST_ROOT_DIR
|
||||
= System.getProperty("test.build.data","build/test/data/work-dir/localfs");
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
LocalFileSystem localFs = FileSystem.getLocal(conf);
|
||||
Path testPath = new Path(TEST_ROOT_DIR, "testPath");
|
||||
Path testPath11 = new Path(TEST_ROOT_DIR, "testPath11");
|
||||
FSDataOutputStream fout = localFs.create(testPath);
|
||||
fout.write("testing".getBytes());
|
||||
fout.close();
|
||||
|
||||
fout = localFs.create(testPath11);
|
||||
fout.write("testing you".getBytes());
|
||||
fout.close();
|
||||
|
||||
localFs.delete(localFs.getChecksumFile(testPath), true);
|
||||
assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
|
||||
|
||||
//copying the wrong checksum file
|
||||
FileUtil.copy(localFs, localFs.getChecksumFile(testPath11), localFs,
|
||||
localFs.getChecksumFile(testPath),false,true,conf);
|
||||
assertTrue("checksum exists", localFs.exists(localFs.getChecksumFile(testPath)));
|
||||
|
||||
boolean errorRead = false;
|
||||
try {
|
||||
TestLocalFileSystem.readFile(localFs, testPath);
|
||||
}catch(ChecksumException ie) {
|
||||
errorRead = true;
|
||||
}
|
||||
assertTrue("error reading", errorRead);
|
||||
|
||||
//now setting verify false, the read should succeed
|
||||
localFs.setVerifyChecksum(false);
|
||||
String str = TestLocalFileSystem.readFile(localFs, testPath);
|
||||
assertTrue("read", "testing".equals(str));
|
||||
|
||||
}
|
||||
}
|
63
src/test/org/apache/hadoop/fs/TestDFVariations.java
Normal file
63
src/test/org/apache/hadoop/fs/TestDFVariations.java
Normal file
@ -0,0 +1,63 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
|
||||
public class TestDFVariations extends TestCase {
|
||||
|
||||
public static class XXDF extends DF {
|
||||
private final String osName;
|
||||
public XXDF(String osName) throws IOException {
|
||||
super(new File(System.getProperty("test.build.data","/tmp")), 0L);
|
||||
this.osName = osName;
|
||||
}
|
||||
@Override
|
||||
public DF.OSType getOSType() {
|
||||
return DF.getOSType(osName);
|
||||
}
|
||||
@Override
|
||||
protected String[] getExecString() {
|
||||
switch(getOSType()) {
|
||||
case OS_TYPE_AIX:
|
||||
return new String[] { "echo", "IGNORE\n", "/dev/sda3",
|
||||
"453115160", "400077240", "11%", "18", "skip%", "/foo/bar", "\n" };
|
||||
default:
|
||||
return new String[] { "echo", "IGNORE\n", "/dev/sda3",
|
||||
"453115160", "53037920", "400077240", "11%", "/foo/bar", "\n" };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testOSParsing() throws Exception {
|
||||
for (DF.OSType ost : EnumSet.allOf(DF.OSType.class)) {
|
||||
XXDF df = new XXDF(ost.getId());
|
||||
assertEquals(ost.getId() + " total", 453115160 * 1024L, df.getCapacity());
|
||||
assertEquals(ost.getId() + " used", 53037920 * 1024L, df.getUsed());
|
||||
assertEquals(ost.getId() + " avail", 400077240 * 1024L, df.getAvailable());
|
||||
assertEquals(ost.getId() + " pcnt used", 11, df.getPercentUsed());
|
||||
assertEquals(ost.getId() + " mount", "/foo/bar", df.getMount());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
95
src/test/org/apache/hadoop/fs/TestDU.java
Normal file
95
src/test/org/apache/hadoop/fs/TestDU.java
Normal file
@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.util.Random;
|
||||
|
||||
/** This test makes sure that "DU" does not get to run on each call to getUsed */
|
||||
public class TestDU extends TestCase {
|
||||
final static private File DU_DIR = new File(
|
||||
System.getProperty("test.build.data","/tmp"), "dutmp");
|
||||
|
||||
public void setUp() throws IOException {
|
||||
FileUtil.fullyDelete(DU_DIR);
|
||||
assertTrue(DU_DIR.mkdirs());
|
||||
}
|
||||
|
||||
public void tearDown() throws IOException {
|
||||
FileUtil.fullyDelete(DU_DIR);
|
||||
}
|
||||
|
||||
private void createFile(File newFile, int size) throws IOException {
|
||||
// write random data so that filesystems with compression enabled (e.g., ZFS)
|
||||
// can't compress the file
|
||||
Random random = new Random();
|
||||
byte[] data = new byte[size];
|
||||
random.nextBytes(data);
|
||||
|
||||
newFile.createNewFile();
|
||||
RandomAccessFile file = new RandomAccessFile(newFile, "rws");
|
||||
|
||||
file.write(data);
|
||||
|
||||
file.getFD().sync();
|
||||
file.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that du returns expected used space for a file.
|
||||
* We assume here that if a file system crates a file of size
|
||||
* that is a multiple of the block size in this file system,
|
||||
* then the used size for the file will be exactly that size.
|
||||
* This is true for most file systems.
|
||||
*
|
||||
* @throws IOException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
public void testDU() throws IOException, InterruptedException {
|
||||
int writtenSize = 32*1024; // writing 32K
|
||||
File file = new File(DU_DIR, "data");
|
||||
createFile(file, writtenSize);
|
||||
|
||||
Thread.sleep(5000); // let the metadata updater catch up
|
||||
|
||||
DU du = new DU(file, 10000);
|
||||
du.start();
|
||||
long duSize = du.getUsed();
|
||||
du.shutdown();
|
||||
|
||||
assertEquals(writtenSize, duSize);
|
||||
|
||||
//test with 0 interval, will not launch thread
|
||||
du = new DU(file, 0);
|
||||
du.start();
|
||||
duSize = du.getUsed();
|
||||
du.shutdown();
|
||||
|
||||
assertEquals(writtenSize, duSize);
|
||||
|
||||
//test without launching thread
|
||||
du = new DU(file, 10000);
|
||||
duSize = du.getUsed();
|
||||
|
||||
assertEquals(writtenSize, duSize);
|
||||
}
|
||||
}
|
139
src/test/org/apache/hadoop/fs/TestGetFileBlockLocations.java
Normal file
139
src/test/org/apache/hadoop/fs/TestGetFileBlockLocations.java
Normal file
@ -0,0 +1,139 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.Random;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
/**
|
||||
* Testing the correctness of FileSystem.getFileBlockLocations.
|
||||
*/
|
||||
public class TestGetFileBlockLocations extends TestCase {
|
||||
private static String TEST_ROOT_DIR =
|
||||
System.getProperty("test.build.data", "/tmp/testGetFileBlockLocations");
|
||||
private static final int FileLength = 4 * 1024 * 1024; // 4MB
|
||||
private Configuration conf;
|
||||
private Path path;
|
||||
private FileSystem fs;
|
||||
private Random random;
|
||||
|
||||
/**
|
||||
* @see TestCase#setUp()
|
||||
*/
|
||||
@Override
|
||||
protected void setUp() throws IOException {
|
||||
conf = new Configuration();
|
||||
Path rootPath = new Path(TEST_ROOT_DIR);
|
||||
path = new Path(rootPath, "TestGetFileBlockLocations");
|
||||
fs = rootPath.getFileSystem(conf);
|
||||
FSDataOutputStream fsdos = fs.create(path, true);
|
||||
byte[] buffer = new byte[1024];
|
||||
while (fsdos.getPos() < FileLength) {
|
||||
fsdos.write(buffer);
|
||||
}
|
||||
fsdos.close();
|
||||
random = new Random(System.nanoTime());
|
||||
}
|
||||
|
||||
private void oneTest(int offBegin, int offEnd, FileStatus status)
|
||||
throws IOException {
|
||||
if (offBegin > offEnd) {
|
||||
int tmp = offBegin;
|
||||
offBegin = offEnd;
|
||||
offEnd = tmp;
|
||||
}
|
||||
BlockLocation[] locations =
|
||||
fs.getFileBlockLocations(status, offBegin, offEnd - offBegin);
|
||||
if (offBegin < status.getLen()) {
|
||||
Arrays.sort(locations, new Comparator<BlockLocation>() {
|
||||
|
||||
@Override
|
||||
public int compare(BlockLocation arg0, BlockLocation arg1) {
|
||||
long cmprv = arg0.getOffset() - arg1.getOffset();
|
||||
if (cmprv < 0) return -1;
|
||||
if (cmprv > 0) return 1;
|
||||
cmprv = arg0.getLength() - arg1.getLength();
|
||||
if (cmprv < 0) return -1;
|
||||
if (cmprv > 0) return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
});
|
||||
offBegin = (int) Math.min(offBegin, status.getLen() - 1);
|
||||
offEnd = (int) Math.min(offEnd, status.getLen());
|
||||
BlockLocation first = locations[0];
|
||||
BlockLocation last = locations[locations.length - 1];
|
||||
assertTrue(first.getOffset() <= offBegin);
|
||||
assertTrue(offEnd <= last.getOffset() + last.getLength());
|
||||
} else {
|
||||
assertTrue(locations.length == 0);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @see TestCase#tearDown()
|
||||
*/
|
||||
@Override
|
||||
protected void tearDown() throws IOException {
|
||||
fs.delete(path, true);
|
||||
fs.close();
|
||||
}
|
||||
|
||||
public void testFailureNegativeParameters() throws IOException {
|
||||
FileStatus status = fs.getFileStatus(path);
|
||||
try {
|
||||
BlockLocation[] locations = fs.getFileBlockLocations(status, -1, 100);
|
||||
fail("Expecting exception being throw");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
BlockLocation[] locations = fs.getFileBlockLocations(status, 100, -1);
|
||||
fail("Expecting exception being throw");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetFileBlockLocations1() throws IOException {
|
||||
FileStatus status = fs.getFileStatus(path);
|
||||
oneTest(0, (int) status.getLen(), status);
|
||||
oneTest(0, (int) status.getLen() * 2, status);
|
||||
oneTest((int) status.getLen() * 2, (int) status.getLen() * 4, status);
|
||||
oneTest((int) status.getLen() / 2, (int) status.getLen() * 3, status);
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
oneTest((int) status.getLen() * i / 10, (int) status.getLen() * (i + 1)
|
||||
/ 10, status);
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetFileBlockLocations2() throws IOException {
|
||||
FileStatus status = fs.getFileStatus(path);
|
||||
for (int i = 0; i < 1000; ++i) {
|
||||
int offBegin = random.nextInt((int) (2 * status.getLen()));
|
||||
int offEnd = random.nextInt((int) (2 * status.getLen()));
|
||||
oneTest(offBegin, offEnd, status);
|
||||
}
|
||||
}
|
||||
}
|
62
src/test/org/apache/hadoop/fs/TestGlobExpander.java
Normal file
62
src/test/org/apache/hadoop/fs/TestGlobExpander.java
Normal file
@ -0,0 +1,62 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestGlobExpander extends TestCase {
|
||||
|
||||
public void testExpansionIsIdentical() throws IOException {
|
||||
checkExpansionIsIdentical("");
|
||||
checkExpansionIsIdentical("/}");
|
||||
checkExpansionIsIdentical("/}{a,b}");
|
||||
checkExpansionIsIdentical("{/");
|
||||
checkExpansionIsIdentical("{a}");
|
||||
checkExpansionIsIdentical("{a,b}/{b,c}");
|
||||
checkExpansionIsIdentical("p\\{a/b,c/d\\}s");
|
||||
checkExpansionIsIdentical("p{a\\/b,c\\/d}s");
|
||||
}
|
||||
|
||||
public void testExpansion() throws IOException {
|
||||
checkExpansion("{a/b}", "a/b");
|
||||
checkExpansion("/}{a/b}", "/}a/b");
|
||||
checkExpansion("p{a/b,c/d}s", "pa/bs", "pc/ds");
|
||||
checkExpansion("{a/b,c/d,{e,f}}", "a/b", "c/d", "{e,f}");
|
||||
checkExpansion("{a/b,c/d}{e,f}", "a/b{e,f}", "c/d{e,f}");
|
||||
checkExpansion("{a,b}/{b,{c/d,e/f}}", "{a,b}/b", "{a,b}/c/d", "{a,b}/e/f");
|
||||
checkExpansion("{a,b}/{c/\\d}", "{a,b}/c/d");
|
||||
}
|
||||
|
||||
private void checkExpansionIsIdentical(String filePattern) throws IOException {
|
||||
checkExpansion(filePattern, filePattern);
|
||||
}
|
||||
|
||||
private void checkExpansion(String filePattern, String... expectedExpansions)
|
||||
throws IOException {
|
||||
List<String> actualExpansions = GlobExpander.expand(filePattern);
|
||||
assertEquals("Different number of expansions", expectedExpansions.length,
|
||||
actualExpansions.size());
|
||||
for (int i = 0; i < expectedExpansions.length; i++) {
|
||||
assertEquals("Expansion of " + filePattern, expectedExpansions[i],
|
||||
actualExpansions.get(i));
|
||||
}
|
||||
}
|
||||
}
|
211
src/test/org/apache/hadoop/fs/TestLocalDirAllocator.java
Normal file
211
src/test/org/apache/hadoop/fs/TestLocalDirAllocator.java
Normal file
@ -0,0 +1,211 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/** This test LocalDirAllocator works correctly;
|
||||
* Every test case uses different buffer dirs to
|
||||
* enforce the AllocatorPerContext initialization.
|
||||
* This test does not run on Cygwin because under Cygwin
|
||||
* a directory can be created in a read-only directory
|
||||
* which breaks this test.
|
||||
*/
|
||||
public class TestLocalDirAllocator extends TestCase {
|
||||
final static private Configuration conf = new Configuration();
|
||||
final static private String BUFFER_DIR_ROOT = "build/test/temp";
|
||||
final static private Path BUFFER_PATH_ROOT = new Path(BUFFER_DIR_ROOT);
|
||||
final static private File BUFFER_ROOT = new File(BUFFER_DIR_ROOT);
|
||||
final static private String BUFFER_DIR[] = new String[] {
|
||||
BUFFER_DIR_ROOT+"/tmp0", BUFFER_DIR_ROOT+"/tmp1", BUFFER_DIR_ROOT+"/tmp2",
|
||||
BUFFER_DIR_ROOT+"/tmp3", BUFFER_DIR_ROOT+"/tmp4", BUFFER_DIR_ROOT+"/tmp5",
|
||||
BUFFER_DIR_ROOT+"/tmp6"};
|
||||
final static private Path BUFFER_PATH[] = new Path[] {
|
||||
new Path(BUFFER_DIR[0]), new Path(BUFFER_DIR[1]), new Path(BUFFER_DIR[2]),
|
||||
new Path(BUFFER_DIR[3]), new Path(BUFFER_DIR[4]), new Path(BUFFER_DIR[5]),
|
||||
new Path(BUFFER_DIR[6])};
|
||||
final static private String CONTEXT = "dfs.client.buffer.dir";
|
||||
final static private String FILENAME = "block";
|
||||
final static private LocalDirAllocator dirAllocator =
|
||||
new LocalDirAllocator(CONTEXT);
|
||||
static LocalFileSystem localFs;
|
||||
final static private boolean isWindows =
|
||||
System.getProperty("os.name").startsWith("Windows");
|
||||
final static int SMALL_FILE_SIZE = 100;
|
||||
static {
|
||||
try {
|
||||
localFs = FileSystem.getLocal(conf);
|
||||
rmBufferDirs();
|
||||
} catch(IOException e) {
|
||||
System.out.println(e.getMessage());
|
||||
e.printStackTrace();
|
||||
System.exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
private static void rmBufferDirs() throws IOException {
|
||||
assertTrue(!localFs.exists(BUFFER_PATH_ROOT) ||
|
||||
localFs.delete(BUFFER_PATH_ROOT, true));
|
||||
}
|
||||
|
||||
private void validateTempDirCreation(int i) throws IOException {
|
||||
File result = createTempFile(SMALL_FILE_SIZE);
|
||||
assertTrue("Checking for " + BUFFER_DIR[i] + " in " + result + " - FAILED!",
|
||||
result.getPath().startsWith(new File(BUFFER_DIR[i], FILENAME).getPath()));
|
||||
}
|
||||
|
||||
private File createTempFile() throws IOException {
|
||||
File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
|
||||
result.delete();
|
||||
return result;
|
||||
}
|
||||
|
||||
private File createTempFile(long size) throws IOException {
|
||||
File result = dirAllocator.createTmpFileForWrite(FILENAME, size, conf);
|
||||
result.delete();
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
|
||||
* The second dir exists & is RW
|
||||
* @throws Exception
|
||||
*/
|
||||
public void test0() throws Exception {
|
||||
if (isWindows) return;
|
||||
try {
|
||||
conf.set(CONTEXT, BUFFER_DIR[0]+","+BUFFER_DIR[1]);
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
|
||||
BUFFER_ROOT.setReadOnly();
|
||||
validateTempDirCreation(1);
|
||||
validateTempDirCreation(1);
|
||||
} finally {
|
||||
Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
|
||||
/** Two buffer dirs. The first dir exists & is on a read-only disk;
|
||||
* The second dir exists & is RW
|
||||
* @throws Exception
|
||||
*/
|
||||
public void test1() throws Exception {
|
||||
if (isWindows) return;
|
||||
try {
|
||||
conf.set(CONTEXT, BUFFER_DIR[1]+","+BUFFER_DIR[2]);
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[2]));
|
||||
BUFFER_ROOT.setReadOnly();
|
||||
validateTempDirCreation(2);
|
||||
validateTempDirCreation(2);
|
||||
} finally {
|
||||
Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
/** Two buffer dirs. Both do not exist but on a RW disk.
|
||||
* Check if tmp dirs are allocated in a round-robin
|
||||
*/
|
||||
public void test2() throws Exception {
|
||||
if (isWindows) return;
|
||||
try {
|
||||
conf.set(CONTEXT, BUFFER_DIR[2]+","+BUFFER_DIR[3]);
|
||||
|
||||
// create the first file, and then figure the round-robin sequence
|
||||
createTempFile(SMALL_FILE_SIZE);
|
||||
int firstDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 2 : 3;
|
||||
int secondDirIdx = (firstDirIdx == 2) ? 3 : 2;
|
||||
|
||||
// check if tmp dirs are allocated in a round-robin manner
|
||||
validateTempDirCreation(firstDirIdx);
|
||||
validateTempDirCreation(secondDirIdx);
|
||||
validateTempDirCreation(firstDirIdx);
|
||||
} finally {
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
|
||||
/** Two buffer dirs. Both exists and on a R/W disk.
|
||||
* Later disk1 becomes read-only.
|
||||
* @throws Exception
|
||||
*/
|
||||
public void test3() throws Exception {
|
||||
if (isWindows) return;
|
||||
try {
|
||||
conf.set(CONTEXT, BUFFER_DIR[3]+","+BUFFER_DIR[4]);
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[3]));
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[4]));
|
||||
|
||||
// create the first file with size, and then figure the round-robin sequence
|
||||
createTempFile(SMALL_FILE_SIZE);
|
||||
|
||||
int nextDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4;
|
||||
validateTempDirCreation(nextDirIdx);
|
||||
|
||||
// change buffer directory 2 to be read only
|
||||
new File(BUFFER_DIR[4]).setReadOnly();
|
||||
validateTempDirCreation(3);
|
||||
validateTempDirCreation(3);
|
||||
} finally {
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Two buffer dirs, on read-write disk.
|
||||
*
|
||||
* Try to create a whole bunch of files.
|
||||
* Verify that they do indeed all get created where they should.
|
||||
*
|
||||
* Would ideally check statistical properties of distribution, but
|
||||
* we don't have the nerve to risk false-positives here.
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
static final int TRIALS = 100;
|
||||
public void test4() throws Exception {
|
||||
if (isWindows) return;
|
||||
try {
|
||||
|
||||
conf.set(CONTEXT, BUFFER_DIR[5]+","+BUFFER_DIR[6]);
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[5]));
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[6]));
|
||||
|
||||
int inDir5=0, inDir6=0;
|
||||
for(int i = 0; i < TRIALS; ++i) {
|
||||
File result = createTempFile();
|
||||
if(result.getPath().startsWith(new File(BUFFER_DIR[5], FILENAME).getPath())) {
|
||||
inDir5++;
|
||||
} else if(result.getPath().startsWith(new File(BUFFER_DIR[6], FILENAME).getPath())) {
|
||||
inDir6++;
|
||||
}
|
||||
result.delete();
|
||||
}
|
||||
|
||||
assertTrue( inDir5 + inDir6 == TRIALS);
|
||||
|
||||
} finally {
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
156
src/test/org/apache/hadoop/fs/TestLocalFileSystem.java
Normal file
156
src/test/org/apache/hadoop/fs/TestLocalFileSystem.java
Normal file
@ -0,0 +1,156 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import java.io.*;
|
||||
import junit.framework.*;
|
||||
|
||||
/**
|
||||
* This class tests the local file system via the FileSystem abstraction.
|
||||
*/
|
||||
public class TestLocalFileSystem extends TestCase {
|
||||
private static String TEST_ROOT_DIR
|
||||
= System.getProperty("test.build.data","build/test/data/work-dir/localfs");
|
||||
|
||||
|
||||
static void writeFile(FileSystem fs, Path name) throws IOException {
|
||||
FSDataOutputStream stm = fs.create(name);
|
||||
stm.writeBytes("42\n");
|
||||
stm.close();
|
||||
}
|
||||
|
||||
static String readFile(FileSystem fs, Path name) throws IOException {
|
||||
byte[] b = new byte[1024];
|
||||
int offset = 0;
|
||||
FSDataInputStream in = fs.open(name);
|
||||
for(int remaining, n;
|
||||
(remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1;
|
||||
offset += n);
|
||||
in.close();
|
||||
|
||||
String s = new String(b, 0, offset);
|
||||
System.out.println("s=" + s);
|
||||
return s;
|
||||
}
|
||||
|
||||
private void cleanupFile(FileSystem fs, Path name) throws IOException {
|
||||
assertTrue(fs.exists(name));
|
||||
fs.delete(name, true);
|
||||
assertTrue(!fs.exists(name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the capability of setting the working directory.
|
||||
*/
|
||||
public void testWorkingDirectory() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
FileSystem fileSys = FileSystem.getLocal(conf);
|
||||
Path origDir = fileSys.getWorkingDirectory();
|
||||
Path subdir = new Path(TEST_ROOT_DIR, "new");
|
||||
try {
|
||||
// make sure it doesn't already exist
|
||||
assertTrue(!fileSys.exists(subdir));
|
||||
// make it and check for it
|
||||
assertTrue(fileSys.mkdirs(subdir));
|
||||
assertTrue(fileSys.isDirectory(subdir));
|
||||
|
||||
fileSys.setWorkingDirectory(subdir);
|
||||
|
||||
// create a directory and check for it
|
||||
Path dir1 = new Path("dir1");
|
||||
assertTrue(fileSys.mkdirs(dir1));
|
||||
assertTrue(fileSys.isDirectory(dir1));
|
||||
|
||||
// delete the directory and make sure it went away
|
||||
fileSys.delete(dir1, true);
|
||||
assertTrue(!fileSys.exists(dir1));
|
||||
|
||||
// create files and manipulate them.
|
||||
Path file1 = new Path("file1");
|
||||
Path file2 = new Path("sub/file2");
|
||||
writeFile(fileSys, file1);
|
||||
fileSys.copyFromLocalFile(file1, file2);
|
||||
assertTrue(fileSys.exists(file1));
|
||||
assertTrue(fileSys.isFile(file1));
|
||||
cleanupFile(fileSys, file2);
|
||||
fileSys.copyToLocalFile(file1, file2);
|
||||
cleanupFile(fileSys, file2);
|
||||
|
||||
// try a rename
|
||||
fileSys.rename(file1, file2);
|
||||
assertTrue(!fileSys.exists(file1));
|
||||
assertTrue(fileSys.exists(file2));
|
||||
fileSys.rename(file2, file1);
|
||||
|
||||
// try reading a file
|
||||
InputStream stm = fileSys.open(file1);
|
||||
byte[] buffer = new byte[3];
|
||||
int bytesRead = stm.read(buffer, 0, 3);
|
||||
assertEquals("42\n", new String(buffer, 0, bytesRead));
|
||||
stm.close();
|
||||
} finally {
|
||||
fileSys.setWorkingDirectory(origDir);
|
||||
fileSys.delete(subdir, true);
|
||||
}
|
||||
}
|
||||
|
||||
public void testCopy() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
LocalFileSystem fs = FileSystem.getLocal(conf);
|
||||
Path src = new Path(TEST_ROOT_DIR, "dingo");
|
||||
Path dst = new Path(TEST_ROOT_DIR, "yak");
|
||||
writeFile(fs, src);
|
||||
assertTrue(FileUtil.copy(fs, src, fs, dst, true, false, conf));
|
||||
assertTrue(!fs.exists(src) && fs.exists(dst));
|
||||
assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf));
|
||||
assertTrue(fs.exists(src) && fs.exists(dst));
|
||||
assertTrue(FileUtil.copy(fs, src, fs, dst, true, true, conf));
|
||||
assertTrue(!fs.exists(src) && fs.exists(dst));
|
||||
fs.mkdirs(src);
|
||||
assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf));
|
||||
Path tmp = new Path(src, dst.getName());
|
||||
assertTrue(fs.exists(tmp) && fs.exists(dst));
|
||||
assertTrue(FileUtil.copy(fs, dst, fs, src, false, true, conf));
|
||||
assertTrue(fs.delete(tmp, true));
|
||||
fs.mkdirs(tmp);
|
||||
try {
|
||||
FileUtil.copy(fs, dst, fs, src, true, true, conf);
|
||||
fail("Failed to detect existing dir");
|
||||
} catch (IOException e) { }
|
||||
}
|
||||
|
||||
public void testHomeDirectory() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
FileSystem fileSys = FileSystem.getLocal(conf);
|
||||
Path home = new Path(System.getProperty("user.home"))
|
||||
.makeQualified(fileSys);
|
||||
Path fsHome = fileSys.getHomeDirectory();
|
||||
assertEquals(home, fsHome);
|
||||
}
|
||||
|
||||
public void testPathEscapes() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
Path path = new Path(TEST_ROOT_DIR, "foo%bar");
|
||||
writeFile(fs, path);
|
||||
FileStatus status = fs.getFileStatus(path);
|
||||
assertEquals(path.makeQualified(fs), status.getPath());
|
||||
cleanupFile(fs, path);
|
||||
}
|
||||
}
|
157
src/test/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
Normal file
157
src/test/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
Normal file
@ -0,0 +1,157 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.permission.*;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
import junit.framework.*;
|
||||
|
||||
/**
|
||||
* This class tests the local file system via the FileSystem abstraction.
|
||||
*/
|
||||
public class TestLocalFileSystemPermission extends TestCase {
|
||||
static final String TEST_PATH_PREFIX = new Path(System.getProperty(
|
||||
"test.build.data", "/tmp")).toString().replace(' ', '_')
|
||||
+ "/" + TestLocalFileSystemPermission.class.getSimpleName() + "_";
|
||||
|
||||
{
|
||||
try {
|
||||
((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
|
||||
.setLevel(org.apache.log4j.Level.DEBUG);
|
||||
}
|
||||
catch(Exception e) {
|
||||
System.out.println("Cannot change log level\n"
|
||||
+ StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
|
||||
private Path writeFile(FileSystem fs, String name) throws IOException {
|
||||
Path f = new Path(TEST_PATH_PREFIX + name);
|
||||
FSDataOutputStream stm = fs.create(f);
|
||||
stm.writeBytes("42\n");
|
||||
stm.close();
|
||||
return f;
|
||||
}
|
||||
|
||||
private void cleanupFile(FileSystem fs, Path name) throws IOException {
|
||||
assertTrue(fs.exists(name));
|
||||
fs.delete(name, true);
|
||||
assertTrue(!fs.exists(name));
|
||||
}
|
||||
|
||||
/** Test LocalFileSystem.setPermission */
|
||||
public void testLocalFSsetPermission() throws IOException {
|
||||
if (Path.WINDOWS) {
|
||||
System.out.println("Cannot run test for Windows");
|
||||
return;
|
||||
}
|
||||
Configuration conf = new Configuration();
|
||||
LocalFileSystem localfs = FileSystem.getLocal(conf);
|
||||
String filename = "foo";
|
||||
Path f = writeFile(localfs, filename);
|
||||
try {
|
||||
System.out.println(filename + ": " + getPermission(localfs, f));
|
||||
}
|
||||
catch(Exception e) {
|
||||
System.out.println(StringUtils.stringifyException(e));
|
||||
System.out.println("Cannot run test");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// create files and manipulate them.
|
||||
FsPermission all = new FsPermission((short)0777);
|
||||
FsPermission none = new FsPermission((short)0);
|
||||
|
||||
localfs.setPermission(f, none);
|
||||
assertEquals(none, getPermission(localfs, f));
|
||||
|
||||
localfs.setPermission(f, all);
|
||||
assertEquals(all, getPermission(localfs, f));
|
||||
}
|
||||
finally {cleanupFile(localfs, f);}
|
||||
}
|
||||
|
||||
FsPermission getPermission(LocalFileSystem fs, Path p) throws IOException {
|
||||
return fs.getFileStatus(p).getPermission();
|
||||
}
|
||||
|
||||
/** Test LocalFileSystem.setOwner */
|
||||
public void testLocalFSsetOwner() throws IOException {
|
||||
if (Path.WINDOWS) {
|
||||
System.out.println("Cannot run test for Windows");
|
||||
return;
|
||||
}
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
LocalFileSystem localfs = FileSystem.getLocal(conf);
|
||||
String filename = "bar";
|
||||
Path f = writeFile(localfs, filename);
|
||||
List<String> groups = null;
|
||||
try {
|
||||
groups = getGroups();
|
||||
System.out.println(filename + ": " + getPermission(localfs, f));
|
||||
}
|
||||
catch(IOException e) {
|
||||
System.out.println(StringUtils.stringifyException(e));
|
||||
System.out.println("Cannot run test");
|
||||
return;
|
||||
}
|
||||
if (groups == null || groups.size() < 1) {
|
||||
System.out.println("Cannot run test: need at least one group. groups="
|
||||
+ groups);
|
||||
return;
|
||||
}
|
||||
|
||||
// create files and manipulate them.
|
||||
try {
|
||||
String g0 = groups.get(0);
|
||||
localfs.setOwner(f, null, g0);
|
||||
assertEquals(g0, getGroup(localfs, f));
|
||||
|
||||
if (groups.size() > 1) {
|
||||
String g1 = groups.get(1);
|
||||
localfs.setOwner(f, null, g1);
|
||||
assertEquals(g1, getGroup(localfs, f));
|
||||
} else {
|
||||
System.out.println("Not testing changing the group since user " +
|
||||
"belongs to only one group.");
|
||||
}
|
||||
}
|
||||
finally {cleanupFile(localfs, f);}
|
||||
}
|
||||
|
||||
static List<String> getGroups() throws IOException {
|
||||
List<String> a = new ArrayList<String>();
|
||||
String s = Shell.execCommand(Shell.getGROUPS_COMMAND());
|
||||
for(StringTokenizer t = new StringTokenizer(s); t.hasMoreTokens(); ) {
|
||||
a.add(t.nextToken());
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
String getGroup(LocalFileSystem fs, Path p) throws IOException {
|
||||
return fs.getFileStatus(p).getGroup();
|
||||
}
|
||||
}
|
152
src/test/org/apache/hadoop/fs/TestPath.java
Normal file
152
src/test/org/apache/hadoop/fs/TestPath.java
Normal file
@ -0,0 +1,152 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.util.*;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestPath extends TestCase {
|
||||
public void testToString() {
|
||||
toStringTest("/");
|
||||
toStringTest("/foo");
|
||||
toStringTest("/foo/bar");
|
||||
toStringTest("foo");
|
||||
toStringTest("foo/bar");
|
||||
boolean emptyException = false;
|
||||
try {
|
||||
toStringTest("");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expect to receive an IllegalArgumentException
|
||||
emptyException = true;
|
||||
}
|
||||
assertTrue(emptyException);
|
||||
if (Path.WINDOWS) {
|
||||
toStringTest("c:");
|
||||
toStringTest("c:/");
|
||||
toStringTest("c:foo");
|
||||
toStringTest("c:foo/bar");
|
||||
toStringTest("c:foo/bar");
|
||||
toStringTest("c:/foo/bar");
|
||||
}
|
||||
}
|
||||
|
||||
private void toStringTest(String pathString) {
|
||||
assertEquals(pathString, new Path(pathString).toString());
|
||||
}
|
||||
|
||||
public void testNormalize() {
|
||||
assertEquals("/", new Path("//").toString());
|
||||
assertEquals("/foo", new Path("/foo/").toString());
|
||||
assertEquals("/foo", new Path("/foo/").toString());
|
||||
assertEquals("foo", new Path("foo/").toString());
|
||||
assertEquals("foo", new Path("foo//").toString());
|
||||
assertEquals("foo/bar", new Path("foo//bar").toString());
|
||||
if (Path.WINDOWS) {
|
||||
assertEquals("c:/a/b", new Path("c:\\a\\b").toString());
|
||||
}
|
||||
}
|
||||
|
||||
public void testIsAbsolute() {
|
||||
assertTrue(new Path("/").isAbsolute());
|
||||
assertTrue(new Path("/foo").isAbsolute());
|
||||
assertFalse(new Path("foo").isAbsolute());
|
||||
assertFalse(new Path("foo/bar").isAbsolute());
|
||||
assertFalse(new Path(".").isAbsolute());
|
||||
if (Path.WINDOWS) {
|
||||
assertTrue(new Path("c:/a/b").isAbsolute());
|
||||
assertFalse(new Path("c:a/b").isAbsolute());
|
||||
}
|
||||
}
|
||||
|
||||
public void testParent() {
|
||||
assertEquals(new Path("/foo"), new Path("/foo/bar").getParent());
|
||||
assertEquals(new Path("foo"), new Path("foo/bar").getParent());
|
||||
assertEquals(new Path("/"), new Path("/foo").getParent());
|
||||
if (Path.WINDOWS) {
|
||||
assertEquals(new Path("c:/"), new Path("c:/foo").getParent());
|
||||
}
|
||||
}
|
||||
|
||||
public void testChild() {
|
||||
assertEquals(new Path("."), new Path(".", "."));
|
||||
assertEquals(new Path("/"), new Path("/", "."));
|
||||
assertEquals(new Path("/"), new Path(".", "/"));
|
||||
assertEquals(new Path("/foo"), new Path("/", "foo"));
|
||||
assertEquals(new Path("/foo/bar"), new Path("/foo", "bar"));
|
||||
assertEquals(new Path("/foo/bar/baz"), new Path("/foo/bar", "baz"));
|
||||
assertEquals(new Path("/foo/bar/baz"), new Path("/foo", "bar/baz"));
|
||||
assertEquals(new Path("foo"), new Path(".", "foo"));
|
||||
assertEquals(new Path("foo/bar"), new Path("foo", "bar"));
|
||||
assertEquals(new Path("foo/bar/baz"), new Path("foo", "bar/baz"));
|
||||
assertEquals(new Path("foo/bar/baz"), new Path("foo/bar", "baz"));
|
||||
assertEquals(new Path("/foo"), new Path("/bar", "/foo"));
|
||||
if (Path.WINDOWS) {
|
||||
assertEquals(new Path("c:/foo"), new Path("/bar", "c:/foo"));
|
||||
assertEquals(new Path("c:/foo"), new Path("d:/bar", "c:/foo"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testEquals() {
|
||||
assertFalse(new Path("/").equals(new Path("/foo")));
|
||||
}
|
||||
|
||||
public void testDots() {
|
||||
// Test Path(String)
|
||||
assertEquals(new Path("/foo/bar/baz").toString(), "/foo/bar/baz");
|
||||
assertEquals(new Path("/foo/bar", ".").toString(), "/foo/bar");
|
||||
assertEquals(new Path("/foo/bar/../baz").toString(), "/foo/baz");
|
||||
assertEquals(new Path("/foo/bar/./baz").toString(), "/foo/bar/baz");
|
||||
assertEquals(new Path("/foo/bar/baz/../../fud").toString(), "/foo/fud");
|
||||
assertEquals(new Path("/foo/bar/baz/.././../fud").toString(), "/foo/fud");
|
||||
assertEquals(new Path("../../foo/bar").toString(), "../../foo/bar");
|
||||
assertEquals(new Path(".././../foo/bar").toString(), "../../foo/bar");
|
||||
assertEquals(new Path("./foo/bar/baz").toString(), "foo/bar/baz");
|
||||
assertEquals(new Path("/foo/bar/../../baz/boo").toString(), "/baz/boo");
|
||||
assertEquals(new Path("foo/bar/").toString(), "foo/bar");
|
||||
assertEquals(new Path("foo/bar/../baz").toString(), "foo/baz");
|
||||
assertEquals(new Path("foo/bar/../../baz/boo").toString(), "baz/boo");
|
||||
|
||||
|
||||
// Test Path(Path,Path)
|
||||
assertEquals(new Path("/foo/bar", "baz/boo").toString(), "/foo/bar/baz/boo");
|
||||
assertEquals(new Path("foo/bar/","baz/bud").toString(), "foo/bar/baz/bud");
|
||||
|
||||
assertEquals(new Path("/foo/bar","../../boo/bud").toString(), "/boo/bud");
|
||||
assertEquals(new Path("foo/bar","../../boo/bud").toString(), "boo/bud");
|
||||
assertEquals(new Path(".","boo/bud").toString(), "boo/bud");
|
||||
|
||||
assertEquals(new Path("/foo/bar/baz","../../boo/bud").toString(), "/foo/boo/bud");
|
||||
assertEquals(new Path("foo/bar/baz","../../boo/bud").toString(), "foo/boo/bud");
|
||||
|
||||
|
||||
assertEquals(new Path("../../","../../boo/bud").toString(), "../../../../boo/bud");
|
||||
assertEquals(new Path("../../foo","../../../boo/bud").toString(), "../../../../boo/bud");
|
||||
assertEquals(new Path("../../foo/bar","../boo/bud").toString(), "../../foo/boo/bud");
|
||||
|
||||
assertEquals(new Path("foo/bar/baz","../../..").toString(), "");
|
||||
assertEquals(new Path("foo/bar/baz","../../../../..").toString(), "../..");
|
||||
}
|
||||
|
||||
public void testScheme() throws java.io.IOException {
|
||||
assertEquals("foo:/bar", new Path("foo:/","/bar").toString());
|
||||
assertEquals("foo://bar/baz", new Path("foo://bar/","/baz").toString());
|
||||
}
|
||||
|
||||
|
||||
}
|
313
src/test/org/apache/hadoop/fs/TestTrash.java
Normal file
313
src/test/org/apache/hadoop/fs/TestTrash.java
Normal file
@ -0,0 +1,313 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.DataOutputStream;
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.Trash;
|
||||
import org.apache.hadoop.fs.LocalFileSystem;
|
||||
|
||||
/**
|
||||
* This class tests commands from Trash.
|
||||
*/
|
||||
public class TestTrash extends TestCase {
|
||||
|
||||
private final static Path TEST_DIR =
|
||||
new Path(new File(System.getProperty("test.build.data","/tmp")
|
||||
).toURI().toString().replace(' ', '+'), "testTrash");
|
||||
|
||||
protected static Path writeFile(FileSystem fs, Path f) throws IOException {
|
||||
DataOutputStream out = fs.create(f);
|
||||
out.writeBytes("dhruba: " + f);
|
||||
out.close();
|
||||
assertTrue(fs.exists(f));
|
||||
return f;
|
||||
}
|
||||
|
||||
protected static Path mkdir(FileSystem fs, Path p) throws IOException {
|
||||
assertTrue(fs.mkdirs(p));
|
||||
assertTrue(fs.exists(p));
|
||||
assertTrue(fs.getFileStatus(p).isDir());
|
||||
return p;
|
||||
}
|
||||
|
||||
// check that the specified file is in Trash
|
||||
protected static void checkTrash(FileSystem fs, Path trashRoot,
|
||||
Path path) throws IOException {
|
||||
Path p = new Path(trashRoot+"/"+ path.toUri().getPath());
|
||||
assertTrue(fs.exists(p));
|
||||
}
|
||||
|
||||
// check that the specified file is not in Trash
|
||||
static void checkNotInTrash(FileSystem fs, Path trashRoot, String pathname)
|
||||
throws IOException {
|
||||
Path p = new Path(trashRoot+"/"+ new Path(pathname).getName());
|
||||
assertTrue(!fs.exists(p));
|
||||
}
|
||||
|
||||
protected static void trashShell(final FileSystem fs, final Path base)
|
||||
throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("fs.trash.interval", "10"); // 10 minute
|
||||
conf.set("fs.default.name", fs.getUri().toString());
|
||||
FsShell shell = new FsShell();
|
||||
shell.setConf(conf);
|
||||
Path trashRoot = null;
|
||||
|
||||
// First create a new directory with mkdirs
|
||||
Path myPath = new Path(base, "test/mkdirs");
|
||||
mkdir(fs, myPath);
|
||||
|
||||
// Second, create a file in that directory.
|
||||
Path myFile = new Path(base, "test/mkdirs/myFile");
|
||||
writeFile(fs, myFile);
|
||||
|
||||
// Verify that expunge without Trash directory
|
||||
// won't throw Exception
|
||||
{
|
||||
String[] args = new String[1];
|
||||
args[0] = "-expunge";
|
||||
int val = -1;
|
||||
try {
|
||||
val = shell.run(args);
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from Trash.run " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertTrue(val == 0);
|
||||
}
|
||||
|
||||
// Verify that we succeed in removing the file we created.
|
||||
// This should go into Trash.
|
||||
{
|
||||
String[] args = new String[2];
|
||||
args[0] = "-rm";
|
||||
args[1] = myFile.toString();
|
||||
int val = -1;
|
||||
try {
|
||||
val = shell.run(args);
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from Trash.run " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertTrue(val == 0);
|
||||
|
||||
trashRoot = shell.getCurrentTrashDir();
|
||||
checkTrash(fs, trashRoot, myFile);
|
||||
}
|
||||
|
||||
// Verify that we can recreate the file
|
||||
writeFile(fs, myFile);
|
||||
|
||||
// Verify that we succeed in removing the file we re-created
|
||||
{
|
||||
String[] args = new String[2];
|
||||
args[0] = "-rm";
|
||||
args[1] = new Path(base, "test/mkdirs/myFile").toString();
|
||||
int val = -1;
|
||||
try {
|
||||
val = shell.run(args);
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from Trash.run " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertTrue(val == 0);
|
||||
}
|
||||
|
||||
// Verify that we can recreate the file
|
||||
writeFile(fs, myFile);
|
||||
|
||||
// Verify that we succeed in removing the whole directory
|
||||
// along with the file inside it.
|
||||
{
|
||||
String[] args = new String[2];
|
||||
args[0] = "-rmr";
|
||||
args[1] = new Path(base, "test/mkdirs").toString();
|
||||
int val = -1;
|
||||
try {
|
||||
val = shell.run(args);
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from Trash.run " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertTrue(val == 0);
|
||||
}
|
||||
|
||||
// recreate directory
|
||||
mkdir(fs, myPath);
|
||||
|
||||
// Verify that we succeed in removing the whole directory
|
||||
{
|
||||
String[] args = new String[2];
|
||||
args[0] = "-rmr";
|
||||
args[1] = new Path(base, "test/mkdirs").toString();
|
||||
int val = -1;
|
||||
try {
|
||||
val = shell.run(args);
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from Trash.run " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertTrue(val == 0);
|
||||
}
|
||||
|
||||
// Check that we can delete a file from the trash
|
||||
{
|
||||
Path toErase = new Path(trashRoot, "toErase");
|
||||
int retVal = -1;
|
||||
writeFile(fs, toErase);
|
||||
try {
|
||||
retVal = shell.run(new String[] {"-rm", toErase.toString()});
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from Trash.run " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertTrue(retVal == 0);
|
||||
checkNotInTrash (fs, trashRoot, toErase.toString());
|
||||
checkNotInTrash (fs, trashRoot, toErase.toString()+".1");
|
||||
}
|
||||
|
||||
// simulate Trash removal
|
||||
{
|
||||
String[] args = new String[1];
|
||||
args[0] = "-expunge";
|
||||
int val = -1;
|
||||
try {
|
||||
val = shell.run(args);
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from Trash.run " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertTrue(val == 0);
|
||||
}
|
||||
|
||||
// verify that after expunging the Trash, it really goes away
|
||||
checkNotInTrash(fs, trashRoot, new Path(base, "test/mkdirs/myFile").toString());
|
||||
|
||||
// recreate directory and file
|
||||
mkdir(fs, myPath);
|
||||
writeFile(fs, myFile);
|
||||
|
||||
// remove file first, then remove directory
|
||||
{
|
||||
String[] args = new String[2];
|
||||
args[0] = "-rm";
|
||||
args[1] = myFile.toString();
|
||||
int val = -1;
|
||||
try {
|
||||
val = shell.run(args);
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from Trash.run " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertTrue(val == 0);
|
||||
checkTrash(fs, trashRoot, myFile);
|
||||
|
||||
args = new String[2];
|
||||
args[0] = "-rmr";
|
||||
args[1] = myPath.toString();
|
||||
val = -1;
|
||||
try {
|
||||
val = shell.run(args);
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from Trash.run " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertTrue(val == 0);
|
||||
checkTrash(fs, trashRoot, myPath);
|
||||
}
|
||||
|
||||
// attempt to remove parent of trash
|
||||
{
|
||||
String[] args = new String[2];
|
||||
args[0] = "-rmr";
|
||||
args[1] = trashRoot.getParent().getParent().toString();
|
||||
int val = -1;
|
||||
try {
|
||||
val = shell.run(args);
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from Trash.run " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertTrue(val == -1);
|
||||
assertTrue(fs.exists(trashRoot));
|
||||
}
|
||||
}
|
||||
|
||||
public static void trashNonDefaultFS(Configuration conf) throws IOException {
|
||||
conf.set("fs.trash.interval", "10"); // 10 minute
|
||||
// attempt non-default FileSystem trash
|
||||
{
|
||||
final FileSystem lfs = FileSystem.getLocal(conf);
|
||||
Path p = TEST_DIR;
|
||||
Path f = new Path(p, "foo/bar");
|
||||
if (lfs.exists(p)) {
|
||||
lfs.delete(p, true);
|
||||
}
|
||||
try {
|
||||
f = writeFile(lfs, f);
|
||||
|
||||
FileSystem.closeAll();
|
||||
FileSystem localFs = FileSystem.get(URI.create("file:///"), conf);
|
||||
Trash lTrash = new Trash(localFs, conf);
|
||||
lTrash.moveToTrash(f.getParent());
|
||||
checkTrash(localFs, lTrash.getCurrentTrashDir(), f);
|
||||
} finally {
|
||||
if (lfs.exists(p)) {
|
||||
lfs.delete(p, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testTrash() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
|
||||
trashShell(FileSystem.getLocal(conf), TEST_DIR);
|
||||
}
|
||||
|
||||
public void testNonDefaultFS() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
|
||||
conf.set("fs.default.name", "invalid://host/bar/foo");
|
||||
trashNonDefaultFS(conf);
|
||||
}
|
||||
|
||||
static class TestLFS extends LocalFileSystem {
|
||||
Path home;
|
||||
TestLFS() {
|
||||
this(TEST_DIR);
|
||||
}
|
||||
TestLFS(Path home) {
|
||||
super();
|
||||
this.home = home;
|
||||
}
|
||||
public Path getHomeDirectory() {
|
||||
return home;
|
||||
}
|
||||
}
|
||||
}
|
109
src/test/org/apache/hadoop/fs/TestTruncatedInputBug.java
Normal file
109
src/test/org/apache/hadoop/fs/TestTruncatedInputBug.java
Normal file
@ -0,0 +1,109 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
/**
|
||||
* test for the input truncation bug when mark/reset is used.
|
||||
* HADOOP-1489
|
||||
*/
|
||||
public class TestTruncatedInputBug extends TestCase {
|
||||
private static String TEST_ROOT_DIR =
|
||||
new Path(System.getProperty("test.build.data","/tmp"))
|
||||
.toString().replace(' ', '+');
|
||||
|
||||
private void writeFile(FileSystem fileSys,
|
||||
Path name, int nBytesToWrite)
|
||||
throws IOException {
|
||||
DataOutputStream out = fileSys.create(name);
|
||||
for (int i = 0; i < nBytesToWrite; ++i) {
|
||||
out.writeByte(0);
|
||||
}
|
||||
out.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* When mark() is used on BufferedInputStream, the request
|
||||
* size on the checksum file system can be small. However,
|
||||
* checksum file system currently depends on the request size
|
||||
* >= bytesPerSum to work properly.
|
||||
*/
|
||||
public void testTruncatedInputBug() throws IOException {
|
||||
final int ioBufSize = 512;
|
||||
final int fileSize = ioBufSize*4;
|
||||
int filePos = 0;
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt("io.file.buffer.size", ioBufSize);
|
||||
FileSystem fileSys = FileSystem.getLocal(conf);
|
||||
|
||||
try {
|
||||
// First create a test input file.
|
||||
Path testFile = new Path(TEST_ROOT_DIR, "HADOOP-1489");
|
||||
writeFile(fileSys, testFile, fileSize);
|
||||
assertTrue(fileSys.exists(testFile));
|
||||
assertTrue(fileSys.getFileStatus(testFile).getLen() == fileSize);
|
||||
|
||||
// Now read the file for ioBufSize bytes
|
||||
FSDataInputStream in = fileSys.open(testFile, ioBufSize);
|
||||
// seek beyond data buffered by open
|
||||
filePos += ioBufSize * 2 + (ioBufSize - 10);
|
||||
in.seek(filePos);
|
||||
|
||||
// read 4 more bytes before marking
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (in.read() == -1) {
|
||||
break;
|
||||
}
|
||||
++filePos;
|
||||
}
|
||||
|
||||
// Now set mark() to trigger the bug
|
||||
// NOTE: in the fixed code, mark() does nothing (not supported) and
|
||||
// hence won't trigger this bug.
|
||||
in.mark(1);
|
||||
System.out.println("MARKED");
|
||||
|
||||
// Try to read the rest
|
||||
while (filePos < fileSize) {
|
||||
if (in.read() == -1) {
|
||||
break;
|
||||
}
|
||||
++filePos;
|
||||
}
|
||||
in.close();
|
||||
|
||||
System.out.println("Read " + filePos + " bytes."
|
||||
+ " file size=" + fileSize);
|
||||
assertTrue(filePos == fileSize);
|
||||
|
||||
} finally {
|
||||
try {
|
||||
fileSys.close();
|
||||
} catch (Exception e) {
|
||||
// noop
|
||||
}
|
||||
}
|
||||
} // end testTruncatedInputBug
|
||||
}
|
150
src/test/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java
Normal file
150
src/test/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java
Normal file
@ -0,0 +1,150 @@
|
||||
/**
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
* implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*
|
||||
* @author: Sriram Rao (Kosmix Corp.)
|
||||
*
|
||||
* We need to provide the ability to the code in fs/kfs without really
|
||||
* having a KFS deployment. For this purpose, use the LocalFileSystem
|
||||
* as a way to "emulate" KFS.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.kfs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
public class KFSEmulationImpl implements IFSImpl {
|
||||
FileSystem localFS;
|
||||
|
||||
public KFSEmulationImpl(Configuration conf) throws IOException {
|
||||
localFS = FileSystem.getLocal(conf);
|
||||
}
|
||||
|
||||
public boolean exists(String path) throws IOException {
|
||||
return localFS.exists(new Path(path));
|
||||
}
|
||||
public boolean isDirectory(String path) throws IOException {
|
||||
return localFS.isDirectory(new Path(path));
|
||||
}
|
||||
public boolean isFile(String path) throws IOException {
|
||||
return localFS.isFile(new Path(path));
|
||||
}
|
||||
|
||||
public String[] readdir(String path) throws IOException {
|
||||
FileStatus[] p = localFS.listStatus(new Path(path));
|
||||
String[] entries = null;
|
||||
|
||||
if (p == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
entries = new String[p.length];
|
||||
for (int i = 0; i < p.length; i++)
|
||||
entries[i] = p[i].getPath().toString();
|
||||
return entries;
|
||||
}
|
||||
|
||||
public FileStatus[] readdirplus(Path path) throws IOException {
|
||||
return localFS.listStatus(path);
|
||||
}
|
||||
|
||||
public int mkdirs(String path) throws IOException {
|
||||
if (localFS.mkdirs(new Path(path)))
|
||||
return 0;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
public int rename(String source, String dest) throws IOException {
|
||||
if (localFS.rename(new Path(source), new Path(dest)))
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
public int rmdir(String path) throws IOException {
|
||||
if (isDirectory(path)) {
|
||||
// the directory better be empty
|
||||
String[] dirEntries = readdir(path);
|
||||
if ((dirEntries.length <= 2) && (localFS.delete(new Path(path), true)))
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
public int remove(String path) throws IOException {
|
||||
if (isFile(path) && (localFS.delete(new Path(path), true)))
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
public long filesize(String path) throws IOException {
|
||||
return localFS.getFileStatus(new Path(path)).getLen();
|
||||
}
|
||||
public short getReplication(String path) throws IOException {
|
||||
return 1;
|
||||
}
|
||||
public short setReplication(String path, short replication) throws IOException {
|
||||
return 1;
|
||||
}
|
||||
public String[][] getDataLocation(String path, long start, long len) throws IOException {
|
||||
BlockLocation[] blkLocations =
|
||||
localFS.getFileBlockLocations(localFS.getFileStatus(new Path(path)),
|
||||
start, len);
|
||||
if ((blkLocations == null) || (blkLocations.length == 0)) {
|
||||
return new String[0][];
|
||||
}
|
||||
int blkCount = blkLocations.length;
|
||||
String[][]hints = new String[blkCount][];
|
||||
for (int i=0; i < blkCount ; i++) {
|
||||
String[] hosts = blkLocations[i].getHosts();
|
||||
hints[i] = new String[hosts.length];
|
||||
hints[i] = hosts;
|
||||
}
|
||||
return hints;
|
||||
}
|
||||
|
||||
public long getModificationTime(String path) throws IOException {
|
||||
FileStatus s = localFS.getFileStatus(new Path(path));
|
||||
if (s == null)
|
||||
return 0;
|
||||
|
||||
return s.getModificationTime();
|
||||
}
|
||||
|
||||
public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException {
|
||||
// besides path/overwrite, the other args don't matter for
|
||||
// testing purposes.
|
||||
return localFS.append(new Path(path));
|
||||
}
|
||||
|
||||
public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException {
|
||||
// besides path/overwrite, the other args don't matter for
|
||||
// testing purposes.
|
||||
return localFS.create(new Path(path));
|
||||
}
|
||||
|
||||
public FSDataInputStream open(String path, int bufferSize) throws IOException {
|
||||
return localFS.open(new Path(path));
|
||||
}
|
||||
|
||||
|
||||
};
|
204
src/test/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java
Normal file
204
src/test/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java
Normal file
@ -0,0 +1,204 @@
|
||||
/**
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
* implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*
|
||||
* @author: Sriram Rao (Kosmix Corp.)
|
||||
*
|
||||
* Unit tests for testing the KosmosFileSystem API implementation.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.kfs;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.*;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import org.apache.hadoop.fs.kfs.KosmosFileSystem;
|
||||
|
||||
public class TestKosmosFileSystem extends TestCase {
|
||||
|
||||
KosmosFileSystem kosmosFileSystem;
|
||||
KFSEmulationImpl kfsEmul;
|
||||
Path baseDir;
|
||||
|
||||
@Override
|
||||
protected void setUp() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
|
||||
kfsEmul = new KFSEmulationImpl(conf);
|
||||
kosmosFileSystem = new KosmosFileSystem(kfsEmul);
|
||||
// a dummy URI; we are not connecting to any setup here
|
||||
kosmosFileSystem.initialize(URI.create("kfs:///"), conf);
|
||||
baseDir = new Path(System.getProperty("test.build.data", "/tmp" ) +
|
||||
"/kfs-test");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
|
||||
}
|
||||
|
||||
// @Test
|
||||
// Check all the directory API's in KFS
|
||||
public void testDirs() throws Exception {
|
||||
Path subDir1 = new Path("dir.1");
|
||||
|
||||
// make the dir
|
||||
kosmosFileSystem.mkdirs(baseDir);
|
||||
assertTrue(kosmosFileSystem.isDirectory(baseDir));
|
||||
kosmosFileSystem.setWorkingDirectory(baseDir);
|
||||
|
||||
kosmosFileSystem.mkdirs(subDir1);
|
||||
assertTrue(kosmosFileSystem.isDirectory(subDir1));
|
||||
|
||||
assertFalse(kosmosFileSystem.exists(new Path("test1")));
|
||||
assertFalse(kosmosFileSystem.isDirectory(new Path("test/dir.2")));
|
||||
|
||||
FileStatus[] p = kosmosFileSystem.listStatus(baseDir);
|
||||
assertEquals(p.length, 1);
|
||||
|
||||
kosmosFileSystem.delete(baseDir, true);
|
||||
assertFalse(kosmosFileSystem.exists(baseDir));
|
||||
}
|
||||
|
||||
// @Test
|
||||
// Check the file API's
|
||||
public void testFiles() throws Exception {
|
||||
Path subDir1 = new Path("dir.1");
|
||||
Path file1 = new Path("dir.1/foo.1");
|
||||
Path file2 = new Path("dir.1/foo.2");
|
||||
|
||||
kosmosFileSystem.mkdirs(baseDir);
|
||||
assertTrue(kosmosFileSystem.isDirectory(baseDir));
|
||||
kosmosFileSystem.setWorkingDirectory(baseDir);
|
||||
|
||||
kosmosFileSystem.mkdirs(subDir1);
|
||||
|
||||
FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null);
|
||||
FSDataOutputStream s2 = kosmosFileSystem.create(file2, true, 4096, (short) 1, (long) 4096, null);
|
||||
|
||||
s1.close();
|
||||
s2.close();
|
||||
|
||||
FileStatus[] p = kosmosFileSystem.listStatus(subDir1);
|
||||
assertEquals(p.length, 2);
|
||||
|
||||
kosmosFileSystem.delete(file1, true);
|
||||
p = kosmosFileSystem.listStatus(subDir1);
|
||||
assertEquals(p.length, 1);
|
||||
|
||||
kosmosFileSystem.delete(file2, true);
|
||||
p = kosmosFileSystem.listStatus(subDir1);
|
||||
assertEquals(p.length, 0);
|
||||
|
||||
kosmosFileSystem.delete(baseDir, true);
|
||||
assertFalse(kosmosFileSystem.exists(baseDir));
|
||||
}
|
||||
|
||||
// @Test
|
||||
// Check file/read write
|
||||
public void testFileIO() throws Exception {
|
||||
Path subDir1 = new Path("dir.1");
|
||||
Path file1 = new Path("dir.1/foo.1");
|
||||
|
||||
kosmosFileSystem.mkdirs(baseDir);
|
||||
assertTrue(kosmosFileSystem.isDirectory(baseDir));
|
||||
kosmosFileSystem.setWorkingDirectory(baseDir);
|
||||
|
||||
kosmosFileSystem.mkdirs(subDir1);
|
||||
|
||||
FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null);
|
||||
|
||||
int bufsz = 4096;
|
||||
byte[] data = new byte[bufsz];
|
||||
|
||||
for (int i = 0; i < data.length; i++)
|
||||
data[i] = (byte) (i % 16);
|
||||
|
||||
// write 4 bytes and read them back; read API should return a byte per call
|
||||
s1.write(32);
|
||||
s1.write(32);
|
||||
s1.write(32);
|
||||
s1.write(32);
|
||||
// write some data
|
||||
s1.write(data, 0, data.length);
|
||||
// flush out the changes
|
||||
s1.close();
|
||||
|
||||
// Read the stuff back and verify it is correct
|
||||
FSDataInputStream s2 = kosmosFileSystem.open(file1, 4096);
|
||||
int v;
|
||||
long nread = 0;
|
||||
|
||||
v = s2.read();
|
||||
assertEquals(v, 32);
|
||||
v = s2.read();
|
||||
assertEquals(v, 32);
|
||||
v = s2.read();
|
||||
assertEquals(v, 32);
|
||||
v = s2.read();
|
||||
assertEquals(v, 32);
|
||||
|
||||
assertEquals(s2.available(), data.length);
|
||||
|
||||
byte[] buf = new byte[bufsz];
|
||||
s2.read(buf, 0, buf.length);
|
||||
nread = s2.getPos();
|
||||
|
||||
for (int i = 0; i < data.length; i++)
|
||||
assertEquals(data[i], buf[i]);
|
||||
|
||||
assertEquals(s2.available(), 0);
|
||||
|
||||
s2.close();
|
||||
|
||||
// append some data to the file
|
||||
try {
|
||||
s1 = kosmosFileSystem.append(file1);
|
||||
for (int i = 0; i < data.length; i++)
|
||||
data[i] = (byte) (i % 17);
|
||||
// write the data
|
||||
s1.write(data, 0, data.length);
|
||||
// flush out the changes
|
||||
s1.close();
|
||||
|
||||
// read it back and validate
|
||||
s2 = kosmosFileSystem.open(file1, 4096);
|
||||
s2.seek(nread);
|
||||
s2.read(buf, 0, buf.length);
|
||||
for (int i = 0; i < data.length; i++)
|
||||
assertEquals(data[i], buf[i]);
|
||||
|
||||
s2.close();
|
||||
} catch (Exception e) {
|
||||
System.out.println("append isn't supported by the underlying fs");
|
||||
}
|
||||
|
||||
kosmosFileSystem.delete(file1, true);
|
||||
assertFalse(kosmosFileSystem.exists(file1));
|
||||
kosmosFileSystem.delete(subDir1, true);
|
||||
assertFalse(kosmosFileSystem.exists(subDir1));
|
||||
kosmosFileSystem.delete(baseDir, true);
|
||||
assertFalse(kosmosFileSystem.exists(baseDir));
|
||||
}
|
||||
|
||||
}
|
160
src/test/org/apache/hadoop/fs/loadGenerator/DataGenerator.java
Normal file
160
src/test/org/apache/hadoop/fs/loadGenerator/DataGenerator.java
Normal file
@ -0,0 +1,160 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.loadGenerator;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
/**
|
||||
* This program reads the directory structure and file structure from
|
||||
* the input directory and creates the namespace in the file system
|
||||
* specified by the configuration in the specified root.
|
||||
* All the files are filled with 'a'.
|
||||
*
|
||||
* The synopsis of the command is
|
||||
* java DataGenerator
|
||||
* -inDir <inDir>: input directory name where directory/file structures
|
||||
* are stored. Its default value is the current directory.
|
||||
* -root <root>: the name of the root directory which the new namespace
|
||||
* is going to be placed under.
|
||||
* Its default value is "/testLoadSpace".
|
||||
*/
|
||||
public class DataGenerator extends Configured implements Tool {
|
||||
private File inDir = StructureGenerator.DEFAULT_STRUCTURE_DIRECTORY;
|
||||
private Path root = DEFAULT_ROOT;
|
||||
private FileSystem fs;
|
||||
final static private long BLOCK_SIZE = 10;
|
||||
final static private String USAGE = "java DataGenerator " +
|
||||
"-inDir <inDir> " +
|
||||
"-root <root>";
|
||||
|
||||
/** default name of the root where the test namespace will be placed under */
|
||||
final static Path DEFAULT_ROOT = new Path("/testLoadSpace");
|
||||
|
||||
/** Main function.
|
||||
* It first parses the command line arguments.
|
||||
* It then reads the directory structure from the input directory
|
||||
* structure file and creates directory structure in the file system
|
||||
* namespace. Afterwards it reads the file attributes and creates files
|
||||
* in the file. All file content is filled with 'a'.
|
||||
*/
|
||||
public int run(String[] args) throws Exception {
|
||||
int exitCode = 0;
|
||||
exitCode = init(args);
|
||||
if (exitCode != 0) {
|
||||
return exitCode;
|
||||
}
|
||||
genDirStructure();
|
||||
genFiles();
|
||||
return exitCode;
|
||||
}
|
||||
|
||||
/** Parse the command line arguments and initialize the data */
|
||||
private int init(String[] args) {
|
||||
try { // initialize file system handle
|
||||
fs = FileSystem.get(getConf());
|
||||
} catch (IOException ioe) {
|
||||
System.err.println("Can not initialize the file system: " +
|
||||
ioe.getLocalizedMessage());
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (int i = 0; i < args.length; i++) { // parse command line
|
||||
if (args[i].equals("-root")) {
|
||||
root = new Path(args[++i]);
|
||||
} else if (args[i].equals("-inDir")) {
|
||||
inDir = new File(args[++i]);
|
||||
} else {
|
||||
System.err.println(USAGE);
|
||||
ToolRunner.printGenericCommandUsage(System.err);
|
||||
System.exit(-1);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Read directory structure file under the input directory.
|
||||
* Create each directory under the specified root.
|
||||
* The directory names are relative to the specified root.
|
||||
*/
|
||||
private void genDirStructure() throws IOException {
|
||||
BufferedReader in = new BufferedReader(
|
||||
new FileReader(new File(inDir,
|
||||
StructureGenerator.DIR_STRUCTURE_FILE_NAME)));
|
||||
String line;
|
||||
while ((line=in.readLine()) != null) {
|
||||
fs.mkdirs(new Path(root+line));
|
||||
}
|
||||
}
|
||||
|
||||
/** Read file structure file under the input directory.
|
||||
* Create each file under the specified root.
|
||||
* The file names are relative to the root.
|
||||
*/
|
||||
private void genFiles() throws IOException {
|
||||
BufferedReader in = new BufferedReader(
|
||||
new FileReader(new File(inDir,
|
||||
StructureGenerator.FILE_STRUCTURE_FILE_NAME)));
|
||||
String line;
|
||||
while ((line=in.readLine()) != null) {
|
||||
String[] tokens = line.split(" ");
|
||||
if (tokens.length != 2) {
|
||||
throw new IOException("Expect at most 2 tokens per line: " + line);
|
||||
}
|
||||
String fileName = root+tokens[0];
|
||||
long fileSize = (long)(BLOCK_SIZE*Double.parseDouble(tokens[1]));
|
||||
genFile(new Path(fileName), fileSize);
|
||||
}
|
||||
}
|
||||
|
||||
/** Create a file with the name <code>file</code> and
|
||||
* a length of <code>fileSize</code>. The file is filled with character 'a'.
|
||||
*/
|
||||
private void genFile(Path file, long fileSize) throws IOException {
|
||||
FSDataOutputStream out = fs.create(file, true,
|
||||
getConf().getInt("io.file.buffer.size", 4096),
|
||||
(short)getConf().getInt("dfs.replication", 3),
|
||||
fs.getDefaultBlockSize());
|
||||
for(long i=0; i<fileSize; i++) {
|
||||
out.writeByte('a');
|
||||
}
|
||||
out.close();
|
||||
}
|
||||
|
||||
/** Main program.
|
||||
*
|
||||
* @param args Command line arguments
|
||||
* @throws Exception
|
||||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
int res = ToolRunner.run(new Configuration(),
|
||||
new DataGenerator(), args);
|
||||
System.exit(res);
|
||||
}
|
||||
}
|
610
src/test/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
Normal file
610
src/test/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
Normal file
@ -0,0 +1,610 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.loadGenerator;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
/** The load generator is a tool for testing NameNode behavior under
|
||||
* different client loads.
|
||||
* It allows the user to generate different mixes of read, write,
|
||||
* and list requests by specifying the probabilities of read and
|
||||
* write. The user controls the intensity of the load by
|
||||
* adjusting parameters for the number of worker threads and the delay
|
||||
* between operations. While load generators are running, the user
|
||||
* can profile and monitor the running of the NameNode. When a load
|
||||
* generator exits, it print some NameNode statistics like the average
|
||||
* execution time of each kind of operations and the NameNode
|
||||
* throughput.
|
||||
*
|
||||
* The user may either specify constant duration, read and write
|
||||
* probabilities via the command line, or may specify a text file
|
||||
* that acts as a script of which read and write probabilities to
|
||||
* use for specified durations.
|
||||
*
|
||||
* The script takes the form of lines of duration in seconds, read
|
||||
* probability and write probability, each separated by white space.
|
||||
* Blank lines and lines starting with # (comments) are ignored.
|
||||
*
|
||||
* After command line argument parsing and data initialization,
|
||||
* the load generator spawns the number of worker threads
|
||||
* as specified by the user.
|
||||
* Each thread sends a stream of requests to the NameNode.
|
||||
* For each iteration, it first decides if it is going to read a file,
|
||||
* create a file, or listing a directory following the read and write
|
||||
* probabilities specified by the user.
|
||||
* When reading, it randomly picks a file in the test space and reads
|
||||
* the entire file. When writing, it randomly picks a directory in the
|
||||
* test space and creates a file whose name consists of the current
|
||||
* machine's host name and the thread id. The length of the file
|
||||
* follows Gaussian distribution with an average size of 2 blocks and
|
||||
* the standard deviation of 1 block. The new file is filled with 'a'.
|
||||
* Immediately after the file creation completes, the file is deleted
|
||||
* from the test space.
|
||||
* While listing, it randomly picks a directory in the test space and
|
||||
* list the directory content.
|
||||
* Between two consecutive operations, the thread pauses for a random
|
||||
* amount of time in the range of [0, maxDelayBetweenOps]
|
||||
* if the specified max delay is not zero.
|
||||
* All threads are stopped when the specified elapsed time has passed
|
||||
* in command-line execution, or all the lines of script have been
|
||||
* executed, if using a script.
|
||||
* Before exiting, the program prints the average execution for
|
||||
* each kind of NameNode operations, and the number of requests
|
||||
* served by the NameNode.
|
||||
*
|
||||
* The synopsis of the command is
|
||||
* java LoadGenerator
|
||||
* -readProbability <read probability>: read probability [0, 1]
|
||||
* with a default value of 0.3333.
|
||||
* -writeProbability <write probability>: write probability [0, 1]
|
||||
* with a default value of 0.3333.
|
||||
* -root <root>: test space with a default value of /testLoadSpace
|
||||
* -maxDelayBetweenOps <maxDelayBetweenOpsInMillis>:
|
||||
* Max delay in the unit of milliseconds between two operations with a
|
||||
* default value of 0 indicating no delay.
|
||||
* -numOfThreads <numOfThreads>:
|
||||
* number of threads to spawn with a default value of 200.
|
||||
* -elapsedTime <elapsedTimeInSecs>:
|
||||
* the elapsed time of program with a default value of 0
|
||||
* indicating running forever
|
||||
* -startTime <startTimeInMillis> : when the threads start to run.
|
||||
* -scriptFile <file name>: text file to parse for scripted operation
|
||||
*/
|
||||
public class LoadGenerator extends Configured implements Tool {
|
||||
public static final Log LOG = LogFactory.getLog(LoadGenerator.class);
|
||||
|
||||
private volatile boolean shouldRun = true;
|
||||
private Path root = DataGenerator.DEFAULT_ROOT;
|
||||
private FileSystem fs;
|
||||
private int maxDelayBetweenOps = 0;
|
||||
private int numOfThreads = 200;
|
||||
private long [] durations = {0};
|
||||
private double [] readProbs = {0.3333};
|
||||
private double [] writeProbs = {0.3333};
|
||||
private volatile int currentIndex = 0;
|
||||
long totalTime = 0;
|
||||
private long startTime = System.currentTimeMillis()+10000;
|
||||
final static private int BLOCK_SIZE = 10;
|
||||
private ArrayList<String> files = new ArrayList<String>(); // a table of file names
|
||||
private ArrayList<String> dirs = new ArrayList<String>(); // a table of directory names
|
||||
private Random r = null;
|
||||
final private static String USAGE = "java LoadGenerator\n" +
|
||||
"-readProbability <read probability>\n" +
|
||||
"-writeProbability <write probability>\n" +
|
||||
"-root <root>\n" +
|
||||
"-maxDelayBetweenOps <maxDelayBetweenOpsInMillis>\n" +
|
||||
"-numOfThreads <numOfThreads>\n" +
|
||||
"-elapsedTime <elapsedTimeInSecs>\n" +
|
||||
"-startTime <startTimeInMillis>\n" +
|
||||
"-scriptFile <filename>";
|
||||
final private String hostname;
|
||||
|
||||
/** Constructor */
|
||||
public LoadGenerator() throws IOException, UnknownHostException {
|
||||
InetAddress addr = InetAddress.getLocalHost();
|
||||
hostname = addr.getHostName();
|
||||
}
|
||||
|
||||
private final static int OPEN = 0;
|
||||
private final static int LIST = 1;
|
||||
private final static int CREATE = 2;
|
||||
private final static int WRITE_CLOSE = 3;
|
||||
private final static int DELETE = 4;
|
||||
private final static int TOTAL_OP_TYPES =5;
|
||||
private long [] executionTime = new long[TOTAL_OP_TYPES];
|
||||
private long [] totalNumOfOps = new long[TOTAL_OP_TYPES];
|
||||
|
||||
/** A thread sends a stream of requests to the NameNode.
|
||||
* At each iteration, it first decides if it is going to read a file,
|
||||
* create a file, or listing a directory following the read
|
||||
* and write probabilities.
|
||||
* When reading, it randomly picks a file in the test space and reads
|
||||
* the entire file. When writing, it randomly picks a directory in the
|
||||
* test space and creates a file whose name consists of the current
|
||||
* machine's host name and the thread id. The length of the file
|
||||
* follows Gaussian distribution with an average size of 2 blocks and
|
||||
* the standard deviation of 1 block. The new file is filled with 'a'.
|
||||
* Immediately after the file creation completes, the file is deleted
|
||||
* from the test space.
|
||||
* While listing, it randomly picks a directory in the test space and
|
||||
* list the directory content.
|
||||
* Between two consecutive operations, the thread pauses for a random
|
||||
* amount of time in the range of [0, maxDelayBetweenOps]
|
||||
* if the specified max delay is not zero.
|
||||
* A thread runs for the specified elapsed time if the time isn't zero.
|
||||
* Otherwise, it runs forever.
|
||||
*/
|
||||
private class DFSClientThread extends Thread {
|
||||
private int id;
|
||||
private long [] executionTime = new long[TOTAL_OP_TYPES];
|
||||
private long [] totalNumOfOps = new long[TOTAL_OP_TYPES];
|
||||
private byte[] buffer = new byte[1024];
|
||||
|
||||
private DFSClientThread(int id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
/** Main loop
|
||||
* Each iteration decides what's the next operation and then pauses.
|
||||
*/
|
||||
public void run() {
|
||||
try {
|
||||
while (shouldRun) {
|
||||
nextOp();
|
||||
delay();
|
||||
}
|
||||
} catch (Exception ioe) {
|
||||
System.err.println(ioe.getLocalizedMessage());
|
||||
ioe.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
/** Let the thread pause for a random amount of time in the range of
|
||||
* [0, maxDelayBetweenOps] if the delay is not zero. Otherwise, no pause.
|
||||
*/
|
||||
private void delay() throws InterruptedException {
|
||||
if (maxDelayBetweenOps>0) {
|
||||
int delay = r.nextInt(maxDelayBetweenOps);
|
||||
Thread.sleep(delay);
|
||||
}
|
||||
}
|
||||
|
||||
/** Perform the next operation.
|
||||
*
|
||||
* Depending on the read and write probabilities, the next
|
||||
* operation could be either read, write, or list.
|
||||
*/
|
||||
private void nextOp() throws IOException {
|
||||
double rn = r.nextDouble();
|
||||
int i = currentIndex;
|
||||
|
||||
if(LOG.isDebugEnabled())
|
||||
LOG.debug("Thread " + this.id + " moving to index " + i);
|
||||
|
||||
if (rn < readProbs[i]) {
|
||||
read();
|
||||
} else if (rn < readProbs[i] + writeProbs[i]) {
|
||||
write();
|
||||
} else {
|
||||
list();
|
||||
}
|
||||
}
|
||||
|
||||
/** Read operation randomly picks a file in the test space and reads
|
||||
* the entire file */
|
||||
private void read() throws IOException {
|
||||
String fileName = files.get(r.nextInt(files.size()));
|
||||
long startTime = System.currentTimeMillis();
|
||||
InputStream in = fs.open(new Path(fileName));
|
||||
executionTime[OPEN] += (System.currentTimeMillis()-startTime);
|
||||
totalNumOfOps[OPEN]++;
|
||||
while (in.read(buffer) != -1) {}
|
||||
in.close();
|
||||
}
|
||||
|
||||
/** The write operation randomly picks a directory in the
|
||||
* test space and creates a file whose name consists of the current
|
||||
* machine's host name and the thread id. The length of the file
|
||||
* follows Gaussian distribution with an average size of 2 blocks and
|
||||
* the standard deviation of 1 block. The new file is filled with 'a'.
|
||||
* Immediately after the file creation completes, the file is deleted
|
||||
* from the test space.
|
||||
*/
|
||||
private void write() throws IOException {
|
||||
String dirName = dirs.get(r.nextInt(dirs.size()));
|
||||
Path file = new Path(dirName, hostname+id);
|
||||
double fileSize = 0;
|
||||
while ((fileSize = r.nextGaussian()+2)<=0) {}
|
||||
genFile(file, (long)(fileSize*BLOCK_SIZE));
|
||||
long startTime = System.currentTimeMillis();
|
||||
fs.delete(file, true);
|
||||
executionTime[DELETE] += (System.currentTimeMillis()-startTime);
|
||||
totalNumOfOps[DELETE]++;
|
||||
}
|
||||
|
||||
/** The list operation randomly picks a directory in the test space and
|
||||
* list the directory content.
|
||||
*/
|
||||
private void list() throws IOException {
|
||||
String dirName = dirs.get(r.nextInt(dirs.size()));
|
||||
long startTime = System.currentTimeMillis();
|
||||
fs.listStatus(new Path(dirName));
|
||||
executionTime[LIST] += (System.currentTimeMillis()-startTime);
|
||||
totalNumOfOps[LIST]++;
|
||||
}
|
||||
}
|
||||
|
||||
/** Main function:
|
||||
* It first initializes data by parsing the command line arguments.
|
||||
* It then starts the number of DFSClient threads as specified by
|
||||
* the user.
|
||||
* It stops all the threads when the specified elapsed time is passed.
|
||||
* Before exiting, it prints the average execution for
|
||||
* each operation and operation throughput.
|
||||
*/
|
||||
public int run(String[] args) throws Exception {
|
||||
int exitCode = init(args);
|
||||
if (exitCode != 0) {
|
||||
return exitCode;
|
||||
}
|
||||
|
||||
barrier();
|
||||
|
||||
DFSClientThread[] threads = new DFSClientThread[numOfThreads];
|
||||
for (int i=0; i<numOfThreads; i++) {
|
||||
threads[i] = new DFSClientThread(i);
|
||||
threads[i].start();
|
||||
}
|
||||
|
||||
if (durations[0] > 0) {
|
||||
while(shouldRun) {
|
||||
Thread.sleep(durations[currentIndex] * 1000);
|
||||
totalTime += durations[currentIndex];
|
||||
|
||||
// Are we on the final line of the script?
|
||||
if( (currentIndex + 1) == durations.length) {
|
||||
shouldRun = false;
|
||||
} else {
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Moving to index " + currentIndex + ": r = "
|
||||
+ readProbs[currentIndex] + ", w = " + writeProbs
|
||||
+ " for duration " + durations[currentIndex]);
|
||||
}
|
||||
currentIndex++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LOG.debug("Done with testing. Waiting for threads to finish.");
|
||||
for (DFSClientThread thread : threads) {
|
||||
thread.join();
|
||||
for (int i=0; i<TOTAL_OP_TYPES; i++) {
|
||||
executionTime[i] += thread.executionTime[i];
|
||||
totalNumOfOps[i] += thread.totalNumOfOps[i];
|
||||
}
|
||||
}
|
||||
long totalOps = 0;
|
||||
for (int i=0; i<TOTAL_OP_TYPES; i++) {
|
||||
totalOps += totalNumOfOps[i];
|
||||
}
|
||||
|
||||
if (totalNumOfOps[OPEN] != 0) {
|
||||
System.out.println("Average open execution time: " +
|
||||
(double)executionTime[OPEN]/totalNumOfOps[OPEN] + "ms");
|
||||
}
|
||||
if (totalNumOfOps[LIST] != 0) {
|
||||
System.out.println("Average list execution time: " +
|
||||
(double)executionTime[LIST]/totalNumOfOps[LIST] + "ms");
|
||||
}
|
||||
if (totalNumOfOps[DELETE] != 0) {
|
||||
System.out.println("Average deletion execution time: " +
|
||||
(double)executionTime[DELETE]/totalNumOfOps[DELETE] + "ms");
|
||||
System.out.println("Average create execution time: " +
|
||||
(double)executionTime[CREATE]/totalNumOfOps[CREATE] + "ms");
|
||||
System.out.println("Average write_close execution time: " +
|
||||
(double)executionTime[WRITE_CLOSE]/totalNumOfOps[WRITE_CLOSE] + "ms");
|
||||
}
|
||||
if (durations[0] != 0) {
|
||||
System.out.println("Average operations per second: " +
|
||||
(double)totalOps/totalTime +"ops/s");
|
||||
}
|
||||
System.out.println();
|
||||
return exitCode;
|
||||
}
|
||||
|
||||
/** Parse the command line arguments and initialize the data */
|
||||
private int init(String[] args) throws IOException {
|
||||
try {
|
||||
fs = FileSystem.get(getConf());
|
||||
} catch (IOException ioe) {
|
||||
System.err.println("Can not initialize the file system: " +
|
||||
ioe.getLocalizedMessage());
|
||||
return -1;
|
||||
}
|
||||
int hostHashCode = hostname.hashCode();
|
||||
boolean scriptSpecified = false;
|
||||
|
||||
try {
|
||||
for (int i = 0; i < args.length; i++) { // parse command line
|
||||
if (args[i].equals("-scriptFile")) {
|
||||
if(loadScriptFile(args[++i]) == -1)
|
||||
return -1;
|
||||
scriptSpecified = true;
|
||||
} else if (args[i].equals("-readProbability")) {
|
||||
if(scriptSpecified) {
|
||||
System.err.println("Can't specify probabilities and use script.");
|
||||
return -1;
|
||||
}
|
||||
readProbs[0] = Double.parseDouble(args[++i]);
|
||||
if (readProbs[0] < 0 || readProbs[0] > 1) {
|
||||
System.err.println(
|
||||
"The read probability must be [0, 1]: " + readProbs[0]);
|
||||
return -1;
|
||||
}
|
||||
} else if (args[i].equals("-writeProbability")) {
|
||||
if(scriptSpecified) {
|
||||
System.err.println("Can't specify probabilities and use script.");
|
||||
return -1;
|
||||
}
|
||||
writeProbs[0] = Double.parseDouble(args[++i]);
|
||||
if (writeProbs[0] < 0 || writeProbs[0] > 1) {
|
||||
System.err.println(
|
||||
"The write probability must be [0, 1]: " + writeProbs[0]);
|
||||
return -1;
|
||||
}
|
||||
} else if (args[i].equals("-root")) {
|
||||
root = new Path(args[++i]);
|
||||
} else if (args[i].equals("-maxDelayBetweenOps")) {
|
||||
maxDelayBetweenOps = Integer.parseInt(args[++i]); // in milliseconds
|
||||
} else if (args[i].equals("-numOfThreads")) {
|
||||
numOfThreads = Integer.parseInt(args[++i]);
|
||||
if (numOfThreads <= 0) {
|
||||
System.err.println(
|
||||
"Number of threads must be positive: " + numOfThreads);
|
||||
return -1;
|
||||
}
|
||||
} else if (args[i].equals("-startTime")) {
|
||||
startTime = Long.parseLong(args[++i]);
|
||||
} else if (args[i].equals("-elapsedTime")) {
|
||||
if(scriptSpecified) {
|
||||
System.err.println("Can't specify elapsedTime and use script.");
|
||||
return -1;
|
||||
}
|
||||
durations[0] = Long.parseLong(args[++i]);
|
||||
} else if (args[i].equals("-seed")) {
|
||||
r = new Random(Long.parseLong(args[++i])+hostHashCode);
|
||||
} else {
|
||||
System.err.println(USAGE);
|
||||
ToolRunner.printGenericCommandUsage(System.err);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} catch (NumberFormatException e) {
|
||||
System.err.println("Illegal parameter: " + e.getLocalizedMessage());
|
||||
System.err.println(USAGE);
|
||||
return -1;
|
||||
}
|
||||
|
||||
for(int i = 0; i < readProbs.length; i++) {
|
||||
if (readProbs[i] + writeProbs[i] <0 || readProbs[i]+ writeProbs[i] > 1) {
|
||||
System.err.println(
|
||||
"The sum of read probability and write probability must be [0, 1]: "
|
||||
+ readProbs[i] + " " + writeProbs[i]);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (r==null) {
|
||||
r = new Random(System.currentTimeMillis()+hostHashCode);
|
||||
}
|
||||
|
||||
return initFileDirTables();
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a script file of the form: lines of text with duration in seconds,
|
||||
* read probability and write probability, separated by white space.
|
||||
*
|
||||
* @param filename Script file
|
||||
* @return 0 if successful, -1 if not
|
||||
* @throws IOException if errors with file IO
|
||||
*/
|
||||
private int loadScriptFile(String filename) throws IOException {
|
||||
FileReader fr = new FileReader(new File(filename));
|
||||
BufferedReader br = new BufferedReader(fr);
|
||||
ArrayList<Long> duration = new ArrayList<Long>();
|
||||
ArrayList<Double> readProb = new ArrayList<Double>();
|
||||
ArrayList<Double> writeProb = new ArrayList<Double>();
|
||||
int lineNum = 0;
|
||||
|
||||
String line;
|
||||
// Read script, parse values, build array of duration, read and write probs
|
||||
while((line = br.readLine()) != null) {
|
||||
lineNum++;
|
||||
if(line.startsWith("#") || line.isEmpty()) // skip comments and blanks
|
||||
continue;
|
||||
|
||||
String[] a = line.split("\\s");
|
||||
if(a.length != 3) {
|
||||
System.err.println("Line " + lineNum +
|
||||
": Incorrect number of parameters: " + line);
|
||||
}
|
||||
|
||||
try {
|
||||
long d = Long.parseLong(a[0]);
|
||||
if(d < 0) {
|
||||
System.err.println("Line " + lineNum + ": Invalid duration: " + d);
|
||||
return -1;
|
||||
}
|
||||
|
||||
double r = Double.parseDouble(a[1]);
|
||||
if(r < 0.0 || r > 1.0 ) {
|
||||
System.err.println("Line " + lineNum +
|
||||
": The read probability must be [0, 1]: " + r);
|
||||
return -1;
|
||||
}
|
||||
|
||||
double w = Double.parseDouble(a[2]);
|
||||
if(w < 0.0 || w > 1.0) {
|
||||
System.err.println("Line " + lineNum +
|
||||
": The read probability must be [0, 1]: " + r);
|
||||
return -1;
|
||||
}
|
||||
|
||||
readProb.add(r);
|
||||
duration.add(d);
|
||||
writeProb.add(w);
|
||||
} catch( NumberFormatException nfe) {
|
||||
System.err.println(lineNum + ": Can't parse: " + line);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
br.close();
|
||||
fr.close();
|
||||
|
||||
// Copy vectors to arrays of values, to avoid autoboxing overhead later
|
||||
durations = new long[duration.size()];
|
||||
readProbs = new double[readProb.size()];
|
||||
writeProbs = new double[writeProb.size()];
|
||||
|
||||
for(int i = 0; i < durations.length; i++) {
|
||||
durations[i] = duration.get(i);
|
||||
readProbs[i] = readProb.get(i);
|
||||
writeProbs[i] = writeProb.get(i);
|
||||
}
|
||||
|
||||
if(durations[0] == 0)
|
||||
System.err.println("Initial duration set to 0. " +
|
||||
"Will loop until stopped manually.");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Create a table that contains all directories under root and
|
||||
* another table that contains all files under root.
|
||||
*/
|
||||
private int initFileDirTables() {
|
||||
try {
|
||||
initFileDirTables(root);
|
||||
} catch (IOException e) {
|
||||
System.err.println(e.getLocalizedMessage());
|
||||
e.printStackTrace();
|
||||
return -1;
|
||||
}
|
||||
if (dirs.isEmpty()) {
|
||||
System.err.println("The test space " + root + " is empty");
|
||||
return -1;
|
||||
}
|
||||
if (files.isEmpty()) {
|
||||
System.err.println("The test space " + root +
|
||||
" does not have any file");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Create a table that contains all directories under the specified path and
|
||||
* another table that contains all files under the specified path and
|
||||
* whose name starts with "_file_".
|
||||
*/
|
||||
private void initFileDirTables(Path path) throws IOException {
|
||||
FileStatus[] stats = fs.listStatus(path);
|
||||
if (stats != null) {
|
||||
for (FileStatus stat : stats) {
|
||||
if (stat.isDir()) {
|
||||
dirs.add(stat.getPath().toString());
|
||||
initFileDirTables(stat.getPath());
|
||||
} else {
|
||||
Path filePath = stat.getPath();
|
||||
if (filePath.getName().startsWith(StructureGenerator.FILE_NAME_PREFIX)) {
|
||||
files.add(filePath.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns when the current number of seconds from the epoch equals
|
||||
* the command line argument given by <code>-startTime</code>.
|
||||
* This allows multiple instances of this program, running on clock
|
||||
* synchronized nodes, to start at roughly the same time.
|
||||
*/
|
||||
private void barrier() {
|
||||
long sleepTime;
|
||||
while ((sleepTime = startTime - System.currentTimeMillis()) > 0) {
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException ex) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Create a file with a length of <code>fileSize</code>.
|
||||
* The file is filled with 'a'.
|
||||
*/
|
||||
private void genFile(Path file, long fileSize) throws IOException {
|
||||
long startTime = System.currentTimeMillis();
|
||||
FSDataOutputStream out = fs.create(file, true,
|
||||
getConf().getInt("io.file.buffer.size", 4096),
|
||||
(short)getConf().getInt("dfs.replication", 3),
|
||||
fs.getDefaultBlockSize());
|
||||
executionTime[CREATE] += (System.currentTimeMillis()-startTime);
|
||||
totalNumOfOps[CREATE]++;
|
||||
|
||||
for (long i=0; i<fileSize; i++) {
|
||||
out.writeByte('a');
|
||||
}
|
||||
startTime = System.currentTimeMillis();
|
||||
out.close();
|
||||
executionTime[WRITE_CLOSE] += (System.currentTimeMillis()-startTime);
|
||||
totalNumOfOps[WRITE_CLOSE]++;
|
||||
}
|
||||
|
||||
/** Main program
|
||||
*
|
||||
* @param args command line arguments
|
||||
* @throws Exception
|
||||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
int res = ToolRunner.run(new Configuration(),
|
||||
new LoadGenerator(), args);
|
||||
System.exit(res);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,307 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.loadGenerator;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.PrintStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
/**
|
||||
* This program generates a random namespace structure with the following
|
||||
* constraints:
|
||||
* 1. The number of subdirectories is a random number in [minWidth, maxWidth].
|
||||
* 2. The maximum depth of each subdirectory is a random number
|
||||
* [2*maxDepth/3, maxDepth].
|
||||
* 3. Files are randomly placed in the empty directories. The size of each
|
||||
* file follows Gaussian distribution.
|
||||
* The generated namespace structure is described by two files in the output
|
||||
* directory. Each line of the first file
|
||||
* contains the full name of a leaf directory.
|
||||
* Each line of the second file contains
|
||||
* the full name of a file and its size, separated by a blank.
|
||||
*
|
||||
* The synopsis of the command is
|
||||
* java StructureGenerator
|
||||
-maxDepth <maxDepth> : maximum depth of the directory tree; default is 5.
|
||||
-minWidth <minWidth> : minimum number of subdirectories per directories; default is 1
|
||||
-maxWidth <maxWidth> : maximum number of subdirectories per directories; default is 5
|
||||
-numOfFiles <#OfFiles> : the total number of files; default is 10.
|
||||
-avgFileSize <avgFileSizeInBlocks>: average size of blocks; default is 1.
|
||||
-outDir <outDir>: output directory; default is the current directory.
|
||||
-seed <seed>: random number generator seed; default is the current time.
|
||||
*/
|
||||
public class StructureGenerator {
|
||||
private int maxDepth = 5;
|
||||
private int minWidth = 1;
|
||||
private int maxWidth = 5;
|
||||
private int numOfFiles = 10;
|
||||
private double avgFileSize = 1;
|
||||
private File outDir = DEFAULT_STRUCTURE_DIRECTORY;
|
||||
final static private String USAGE = "java StructureGenerator\n" +
|
||||
"-maxDepth <maxDepth>\n" +
|
||||
"-minWidth <minWidth>\n" +
|
||||
"-maxWidth <maxWidth>\n" +
|
||||
"-numOfFiles <#OfFiles>\n" +
|
||||
"-avgFileSize <avgFileSizeInBlocks>\n" +
|
||||
"-outDir <outDir>\n" +
|
||||
"-seed <seed>";
|
||||
|
||||
private Random r = null;
|
||||
|
||||
/** Default directory for storing file/directory structure */
|
||||
final static File DEFAULT_STRUCTURE_DIRECTORY = new File(".");
|
||||
/** The name of the file for storing directory structure */
|
||||
final static String DIR_STRUCTURE_FILE_NAME = "dirStructure";
|
||||
/** The name of the file for storing file structure */
|
||||
final static String FILE_STRUCTURE_FILE_NAME = "fileStructure";
|
||||
/** The name prefix for the files created by this program */
|
||||
final static String FILE_NAME_PREFIX = "_file_";
|
||||
|
||||
/**
|
||||
* The main function first parses the command line arguments,
|
||||
* then generates in-memory directory structure and outputs to a file,
|
||||
* last generates in-memory files and outputs them to a file.
|
||||
*/
|
||||
public int run(String[] args) throws Exception {
|
||||
int exitCode = 0;
|
||||
exitCode = init(args);
|
||||
if (exitCode != 0) {
|
||||
return exitCode;
|
||||
}
|
||||
genDirStructure();
|
||||
output(new File(outDir, DIR_STRUCTURE_FILE_NAME));
|
||||
genFileStructure();
|
||||
outputFiles(new File(outDir, FILE_STRUCTURE_FILE_NAME));
|
||||
return exitCode;
|
||||
}
|
||||
|
||||
/** Parse the command line arguments and initialize the data */
|
||||
private int init(String[] args) {
|
||||
try {
|
||||
for (int i = 0; i < args.length; i++) { // parse command line
|
||||
if (args[i].equals("-maxDepth")) {
|
||||
maxDepth = Integer.parseInt(args[++i]);
|
||||
if (maxDepth<1) {
|
||||
System.err.println("maxDepth must be positive: " + maxDepth);
|
||||
return -1;
|
||||
}
|
||||
} else if (args[i].equals("-minWidth")) {
|
||||
minWidth = Integer.parseInt(args[++i]);
|
||||
if (minWidth<0) {
|
||||
System.err.println("minWidth must be positive: " + minWidth);
|
||||
return -1;
|
||||
}
|
||||
} else if (args[i].equals("-maxWidth")) {
|
||||
maxWidth = Integer.parseInt(args[++i]);
|
||||
} else if (args[i].equals("-numOfFiles")) {
|
||||
numOfFiles = Integer.parseInt(args[++i]);
|
||||
if (numOfFiles<1) {
|
||||
System.err.println("NumOfFiles must be positive: " + numOfFiles);
|
||||
return -1;
|
||||
}
|
||||
} else if (args[i].equals("-avgFileSize")) {
|
||||
avgFileSize = Double.parseDouble(args[++i]);
|
||||
if (avgFileSize<=0) {
|
||||
System.err.println("AvgFileSize must be positive: " + avgFileSize);
|
||||
return -1;
|
||||
}
|
||||
} else if (args[i].equals("-outDir")) {
|
||||
outDir = new File(args[++i]);
|
||||
} else if (args[i].equals("-seed")) {
|
||||
r = new Random(Long.parseLong(args[++i]));
|
||||
} else {
|
||||
System.err.println(USAGE);
|
||||
ToolRunner.printGenericCommandUsage(System.err);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} catch (NumberFormatException e) {
|
||||
System.err.println("Illegal parameter: " + e.getLocalizedMessage());
|
||||
System.err.println(USAGE);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (maxWidth < minWidth) {
|
||||
System.err.println(
|
||||
"maxWidth must be bigger than minWidth: " + maxWidth);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (r==null) {
|
||||
r = new Random();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** In memory representation of a directory */
|
||||
private static class INode {
|
||||
private String name;
|
||||
private List<INode> children = new ArrayList<INode>();
|
||||
|
||||
/** Constructor */
|
||||
private INode(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
/** Add a child (subdir/file) */
|
||||
private void addChild(INode child) {
|
||||
children.add(child);
|
||||
}
|
||||
|
||||
/** Output the subtree rooted at the current node.
|
||||
* Only the leaves are printed.
|
||||
*/
|
||||
private void output(PrintStream out, String prefix) {
|
||||
prefix = prefix==null?name:prefix+"/"+name;
|
||||
if (children.isEmpty()) {
|
||||
out.println(prefix);
|
||||
} else {
|
||||
for (INode child : children) {
|
||||
child.output(out, prefix);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Output the files in the subtree rooted at this node */
|
||||
protected void outputFiles(PrintStream out, String prefix) {
|
||||
prefix = prefix==null?name:prefix+"/"+name;
|
||||
for (INode child : children) {
|
||||
child.outputFiles(out, prefix);
|
||||
}
|
||||
}
|
||||
|
||||
/** Add all the leaves in the subtree to the input list */
|
||||
private void getLeaves(List<INode> leaves) {
|
||||
if (children.isEmpty()) {
|
||||
leaves.add(this);
|
||||
} else {
|
||||
for (INode child : children) {
|
||||
child.getLeaves(leaves);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** In memory representation of a file */
|
||||
private static class FileINode extends INode {
|
||||
private double numOfBlocks;
|
||||
|
||||
/** constructor */
|
||||
private FileINode(String name, double numOfBlocks) {
|
||||
super(name);
|
||||
this.numOfBlocks = numOfBlocks;
|
||||
}
|
||||
|
||||
/** Output a file attribute */
|
||||
protected void outputFiles(PrintStream out, String prefix) {
|
||||
prefix = (prefix == null)?super.name: prefix + "/"+super.name;
|
||||
out.println(prefix + " " + numOfBlocks);
|
||||
}
|
||||
}
|
||||
|
||||
private INode root;
|
||||
|
||||
/** Generates a directory tree with a max depth of <code>maxDepth</code> */
|
||||
private void genDirStructure() {
|
||||
root = genDirStructure("", maxDepth);
|
||||
}
|
||||
|
||||
/** Generate a directory tree rooted at <code>rootName</code>
|
||||
* The number of subtree is in the range of [minWidth, maxWidth].
|
||||
* The maximum depth of each subtree is in the range of
|
||||
* [2*maxDepth/3, maxDepth].
|
||||
*/
|
||||
private INode genDirStructure(String rootName, int maxDepth) {
|
||||
INode root = new INode(rootName);
|
||||
|
||||
if (maxDepth>0) {
|
||||
maxDepth--;
|
||||
int minDepth = maxDepth*2/3;
|
||||
// Figure out the number of subdirectories to generate
|
||||
int numOfSubDirs = minWidth + r.nextInt(maxWidth-minWidth+1);
|
||||
// Expand the tree
|
||||
for (int i=0; i<numOfSubDirs; i++) {
|
||||
int childDepth = (maxDepth == 0)?0:
|
||||
(r.nextInt(maxDepth-minDepth+1)+minDepth);
|
||||
INode child = genDirStructure("dir"+i, childDepth);
|
||||
root.addChild(child);
|
||||
}
|
||||
}
|
||||
return root;
|
||||
}
|
||||
|
||||
/** Collects leaf nodes in the tree */
|
||||
private List<INode> getLeaves() {
|
||||
List<INode> leaveDirs = new ArrayList<INode>();
|
||||
root.getLeaves(leaveDirs);
|
||||
return leaveDirs;
|
||||
}
|
||||
|
||||
/** Decides where to place all the files and its length.
|
||||
* It first collects all empty directories in the tree.
|
||||
* For each file, it randomly chooses an empty directory to place the file.
|
||||
* The file's length is generated using Gaussian distribution.
|
||||
*/
|
||||
private void genFileStructure() {
|
||||
List<INode> leaves = getLeaves();
|
||||
int totalLeaves = leaves.size();
|
||||
for (int i=0; i<numOfFiles; i++) {
|
||||
int leaveNum = r.nextInt(totalLeaves);
|
||||
double fileSize;
|
||||
do {
|
||||
fileSize = r.nextGaussian()+avgFileSize;
|
||||
} while (fileSize<0);
|
||||
leaves.get(leaveNum).addChild(
|
||||
new FileINode(FILE_NAME_PREFIX+i, fileSize));
|
||||
}
|
||||
}
|
||||
|
||||
/** Output directory structure to a file, each line of the file
|
||||
* contains the directory name. Only empty directory names are printed. */
|
||||
private void output(File outFile) throws FileNotFoundException {
|
||||
System.out.println("Printing to " + outFile.toString());
|
||||
PrintStream out = new PrintStream(outFile);
|
||||
root.output(out, null);
|
||||
out.close();
|
||||
}
|
||||
|
||||
/** Output all files' attributes to a file, each line of the output file
|
||||
* contains a file name and its length. */
|
||||
private void outputFiles(File outFile) throws FileNotFoundException {
|
||||
System.out.println("Printing to " + outFile.toString());
|
||||
PrintStream out = new PrintStream(outFile);
|
||||
root.outputFiles(out, null);
|
||||
out.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Main program
|
||||
* @param args Command line arguments
|
||||
* @throws Exception
|
||||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
StructureGenerator sg = new StructureGenerator();
|
||||
System.exit(sg.run(args));
|
||||
}
|
||||
}
|
116
src/test/org/apache/hadoop/fs/permission/TestFsPermission.java
Normal file
116
src/test/org/apache/hadoop/fs/permission/TestFsPermission.java
Normal file
@ -0,0 +1,116 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.permission;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import static org.apache.hadoop.fs.permission.FsAction.*;
|
||||
|
||||
public class TestFsPermission extends TestCase {
|
||||
public void testFsAction() {
|
||||
//implies
|
||||
for(FsAction a : FsAction.values()) {
|
||||
assertTrue(ALL.implies(a));
|
||||
}
|
||||
for(FsAction a : FsAction.values()) {
|
||||
assertTrue(a == NONE? NONE.implies(a): !NONE.implies(a));
|
||||
}
|
||||
for(FsAction a : FsAction.values()) {
|
||||
assertTrue(a == READ_EXECUTE || a == READ || a == EXECUTE || a == NONE?
|
||||
READ_EXECUTE.implies(a): !READ_EXECUTE.implies(a));
|
||||
}
|
||||
|
||||
//masks
|
||||
assertEquals(EXECUTE, EXECUTE.and(READ_EXECUTE));
|
||||
assertEquals(READ, READ.and(READ_EXECUTE));
|
||||
assertEquals(NONE, WRITE.and(READ_EXECUTE));
|
||||
|
||||
assertEquals(READ, READ_EXECUTE.and(READ_WRITE));
|
||||
assertEquals(NONE, READ_EXECUTE.and(WRITE));
|
||||
assertEquals(WRITE_EXECUTE, ALL.and(WRITE_EXECUTE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that when manually specifying permission modes we get
|
||||
* the expected values back out for all combinations
|
||||
*/
|
||||
public void testConvertingPermissions() {
|
||||
for(short s = 0; s < 01777; s++) {
|
||||
assertEquals(s, new FsPermission(s).toShort());
|
||||
}
|
||||
|
||||
short s = 0;
|
||||
|
||||
for(boolean sb : new boolean [] { false, true }) {
|
||||
for(FsAction u : FsAction.values()) {
|
||||
for(FsAction g : FsAction.values()) {
|
||||
for(FsAction o : FsAction.values()) {
|
||||
FsPermission f = new FsPermission(u, g, o, sb);
|
||||
assertEquals(s, f.toShort());
|
||||
s++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testStickyBitToString() {
|
||||
// Check that every permission has its sticky bit represented correctly
|
||||
for(boolean sb : new boolean [] { false, true }) {
|
||||
for(FsAction u : FsAction.values()) {
|
||||
for(FsAction g : FsAction.values()) {
|
||||
for(FsAction o : FsAction.values()) {
|
||||
FsPermission f = new FsPermission(u, g, o, sb);
|
||||
if(f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
|
||||
assertEquals('t', f.toString().charAt(8));
|
||||
else if(f.getStickyBit() && !f.getOtherAction().implies(EXECUTE))
|
||||
assertEquals('T', f.toString().charAt(8));
|
||||
else if(!f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
|
||||
assertEquals('x', f.toString().charAt(8));
|
||||
else
|
||||
assertEquals('-', f.toString().charAt(8));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testFsPermission() {
|
||||
|
||||
String symbolic = "-rwxrwxrwx";
|
||||
StringBuilder b = new StringBuilder("-123456789");
|
||||
|
||||
for(int i = 0; i < (1<<9); i++) {
|
||||
for(int j = 1; j < 10; j++) {
|
||||
b.setCharAt(j, '-');
|
||||
}
|
||||
String binary = Integer.toBinaryString(i);
|
||||
|
||||
int len = binary.length();
|
||||
for(int j = 0; j < len; j++) {
|
||||
if (binary.charAt(j) == '1') {
|
||||
int k = 9 - (len - 1 - j);
|
||||
b.setCharAt(k, symbolic.charAt(k));
|
||||
}
|
||||
}
|
||||
|
||||
assertEquals(i, FsPermission.valueOf(b.toString()).toShort());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
185
src/test/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java
Normal file
185
src/test/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java
Normal file
@ -0,0 +1,185 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.s3.INode.FileType;
|
||||
|
||||
/**
|
||||
* A stub implementation of {@link FileSystemStore} for testing
|
||||
* {@link S3FileSystem} without actually connecting to S3.
|
||||
*/
|
||||
class InMemoryFileSystemStore implements FileSystemStore {
|
||||
|
||||
private Configuration conf;
|
||||
private SortedMap<Path, INode> inodes = new TreeMap<Path, INode>();
|
||||
private Map<Long, byte[]> blocks = new HashMap<Long, byte[]>();
|
||||
|
||||
public void initialize(URI uri, Configuration conf) {
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
public String getVersion() throws IOException {
|
||||
return "0";
|
||||
}
|
||||
|
||||
public void deleteINode(Path path) throws IOException {
|
||||
inodes.remove(normalize(path));
|
||||
}
|
||||
|
||||
public void deleteBlock(Block block) throws IOException {
|
||||
blocks.remove(block.getId());
|
||||
}
|
||||
|
||||
public boolean inodeExists(Path path) throws IOException {
|
||||
return inodes.containsKey(normalize(path));
|
||||
}
|
||||
|
||||
public boolean blockExists(long blockId) throws IOException {
|
||||
return blocks.containsKey(blockId);
|
||||
}
|
||||
|
||||
public INode retrieveINode(Path path) throws IOException {
|
||||
return inodes.get(normalize(path));
|
||||
}
|
||||
|
||||
public File retrieveBlock(Block block, long byteRangeStart) throws IOException {
|
||||
byte[] data = blocks.get(block.getId());
|
||||
File file = createTempFile();
|
||||
BufferedOutputStream out = null;
|
||||
try {
|
||||
out = new BufferedOutputStream(new FileOutputStream(file));
|
||||
out.write(data, (int) byteRangeStart, data.length - (int) byteRangeStart);
|
||||
} finally {
|
||||
if (out != null) {
|
||||
out.close();
|
||||
}
|
||||
}
|
||||
return file;
|
||||
}
|
||||
|
||||
private File createTempFile() throws IOException {
|
||||
File dir = new File(conf.get("fs.s3.buffer.dir"));
|
||||
if (!dir.exists() && !dir.mkdirs()) {
|
||||
throw new IOException("Cannot create S3 buffer directory: " + dir);
|
||||
}
|
||||
File result = File.createTempFile("test-", ".tmp", dir);
|
||||
result.deleteOnExit();
|
||||
return result;
|
||||
}
|
||||
|
||||
public Set<Path> listSubPaths(Path path) throws IOException {
|
||||
Path normalizedPath = normalize(path);
|
||||
// This is inefficient but more than adequate for testing purposes.
|
||||
Set<Path> subPaths = new LinkedHashSet<Path>();
|
||||
for (Path p : inodes.tailMap(normalizedPath).keySet()) {
|
||||
if (normalizedPath.equals(p.getParent())) {
|
||||
subPaths.add(p);
|
||||
}
|
||||
}
|
||||
return subPaths;
|
||||
}
|
||||
|
||||
public Set<Path> listDeepSubPaths(Path path) throws IOException {
|
||||
Path normalizedPath = normalize(path);
|
||||
String pathString = normalizedPath.toUri().getPath();
|
||||
if (!pathString.endsWith("/")) {
|
||||
pathString += "/";
|
||||
}
|
||||
// This is inefficient but more than adequate for testing purposes.
|
||||
Set<Path> subPaths = new LinkedHashSet<Path>();
|
||||
for (Path p : inodes.tailMap(normalizedPath).keySet()) {
|
||||
if (p.toUri().getPath().startsWith(pathString)) {
|
||||
subPaths.add(p);
|
||||
}
|
||||
}
|
||||
return subPaths;
|
||||
}
|
||||
|
||||
public void storeINode(Path path, INode inode) throws IOException {
|
||||
inodes.put(normalize(path), inode);
|
||||
}
|
||||
|
||||
public void storeBlock(Block block, File file) throws IOException {
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
byte[] buf = new byte[8192];
|
||||
int numRead;
|
||||
BufferedInputStream in = null;
|
||||
try {
|
||||
in = new BufferedInputStream(new FileInputStream(file));
|
||||
while ((numRead = in.read(buf)) >= 0) {
|
||||
out.write(buf, 0, numRead);
|
||||
}
|
||||
} finally {
|
||||
if (in != null) {
|
||||
in.close();
|
||||
}
|
||||
}
|
||||
blocks.put(block.getId(), out.toByteArray());
|
||||
}
|
||||
|
||||
private Path normalize(Path path) {
|
||||
if (!path.isAbsolute()) {
|
||||
throw new IllegalArgumentException("Path must be absolute: " + path);
|
||||
}
|
||||
return new Path(path.toUri().getPath());
|
||||
}
|
||||
|
||||
public void purge() throws IOException {
|
||||
inodes.clear();
|
||||
blocks.clear();
|
||||
}
|
||||
|
||||
public void dump() throws IOException {
|
||||
StringBuilder sb = new StringBuilder(getClass().getSimpleName());
|
||||
sb.append(", \n");
|
||||
for (Map.Entry<Path, INode> entry : inodes.entrySet()) {
|
||||
sb.append(entry.getKey()).append("\n");
|
||||
INode inode = entry.getValue();
|
||||
sb.append("\t").append(inode.getFileType()).append("\n");
|
||||
if (inode.getFileType() == FileType.DIRECTORY) {
|
||||
continue;
|
||||
}
|
||||
for (int j = 0; j < inode.getBlocks().length; j++) {
|
||||
sb.append("\t").append(inode.getBlocks()[j]).append("\n");
|
||||
}
|
||||
}
|
||||
System.out.println(sb);
|
||||
|
||||
System.out.println(inodes.keySet());
|
||||
System.out.println(blocks.keySet());
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class Jets3tS3FileSystemContractTest
|
||||
extends S3FileSystemContractBaseTest {
|
||||
|
||||
@Override
|
||||
FileSystemStore getFileSystemStore() throws IOException {
|
||||
return new Jets3tFileSystemStore();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystemContractBaseTest;
|
||||
|
||||
public abstract class S3FileSystemContractBaseTest
|
||||
extends FileSystemContractBaseTest {
|
||||
|
||||
private FileSystemStore store;
|
||||
|
||||
abstract FileSystemStore getFileSystemStore() throws IOException;
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
store = getFileSystemStore();
|
||||
fs = new S3FileSystem(store);
|
||||
fs.initialize(URI.create(conf.get("test.fs.s3.name")), conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
store.purge();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
}
|
60
src/test/org/apache/hadoop/fs/s3/TestINode.java
Normal file
60
src/test/org/apache/hadoop/fs/s3/TestINode.java
Normal file
@ -0,0 +1,60 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.fs.s3.INode.FileType;
|
||||
|
||||
public class TestINode extends TestCase {
|
||||
|
||||
public void testSerializeFileWithSingleBlock() throws IOException {
|
||||
Block[] blocks = { new Block(849282477840258181L, 128L) };
|
||||
INode inode = new INode(FileType.FILE, blocks);
|
||||
|
||||
assertEquals("Length", 1L + 4 + 16, inode.getSerializedLength());
|
||||
InputStream in = inode.serialize();
|
||||
|
||||
INode deserialized = INode.deserialize(in);
|
||||
|
||||
assertEquals("FileType", inode.getFileType(), deserialized.getFileType());
|
||||
Block[] deserializedBlocks = deserialized.getBlocks();
|
||||
assertEquals("Length", 1, deserializedBlocks.length);
|
||||
assertEquals("Id", blocks[0].getId(), deserializedBlocks[0].getId());
|
||||
assertEquals("Length", blocks[0].getLength(), deserializedBlocks[0]
|
||||
.getLength());
|
||||
|
||||
}
|
||||
|
||||
public void testSerializeDirectory() throws IOException {
|
||||
INode inode = INode.DIRECTORY_INODE;
|
||||
assertEquals("Length", 1L, inode.getSerializedLength());
|
||||
InputStream in = inode.serialize();
|
||||
INode deserialized = INode.deserialize(in);
|
||||
assertSame(INode.DIRECTORY_INODE, deserialized);
|
||||
}
|
||||
|
||||
public void testDeserializeNull() throws IOException {
|
||||
assertNull(INode.deserialize(null));
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class TestInMemoryS3FileSystemContract
|
||||
extends S3FileSystemContractBaseTest {
|
||||
|
||||
@Override
|
||||
FileSystemStore getFileSystemStore() throws IOException {
|
||||
return new InMemoryFileSystemStore();
|
||||
}
|
||||
|
||||
}
|
36
src/test/org/apache/hadoop/fs/s3/TestS3Credentials.java
Normal file
36
src/test/org/apache/hadoop/fs/s3/TestS3Credentials.java
Normal file
@ -0,0 +1,36 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.s3;
|
||||
|
||||
import java.net.URI;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
public class TestS3Credentials extends TestCase {
|
||||
public void testInvalidHostnameWithUnderscores() throws Exception {
|
||||
S3Credentials s3Credentials = new S3Credentials();
|
||||
try {
|
||||
s3Credentials.initialize(new URI("s3://a:b@c_d"), new Configuration());
|
||||
fail("Should throw IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("Invalid hostname in URI s3://a:b@c_d", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
50
src/test/org/apache/hadoop/fs/s3/TestS3FileSystem.java
Normal file
50
src/test/org/apache/hadoop/fs/s3/TestS3FileSystem.java
Normal file
@ -0,0 +1,50 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
public class TestS3FileSystem extends TestCase {
|
||||
|
||||
public void testInitialization() throws IOException {
|
||||
initializationTest("s3://a:b@c", "s3://a:b@c");
|
||||
initializationTest("s3://a:b@c/", "s3://a:b@c");
|
||||
initializationTest("s3://a:b@c/path", "s3://a:b@c");
|
||||
initializationTest("s3://a@c", "s3://a@c");
|
||||
initializationTest("s3://a@c/", "s3://a@c");
|
||||
initializationTest("s3://a@c/path", "s3://a@c");
|
||||
initializationTest("s3://c", "s3://c");
|
||||
initializationTest("s3://c/", "s3://c");
|
||||
initializationTest("s3://c/path", "s3://c");
|
||||
}
|
||||
|
||||
private void initializationTest(String initializationUri, String expectedUri)
|
||||
throws IOException {
|
||||
|
||||
S3FileSystem fs = new S3FileSystem(new InMemoryFileSystemStore());
|
||||
fs.initialize(URI.create(initializationUri), new Configuration());
|
||||
assertEquals(URI.create(expectedUri), fs.getUri());
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,198 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3native;
|
||||
|
||||
import static org.apache.hadoop.fs.s3native.NativeS3FileSystem.PATH_DELIMITER;
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.SortedMap;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A stub implementation of {@link NativeFileSystemStore} for testing
|
||||
* {@link NativeS3FileSystem} without actually connecting to S3.
|
||||
* </p>
|
||||
*/
|
||||
class InMemoryNativeFileSystemStore implements NativeFileSystemStore {
|
||||
|
||||
private Configuration conf;
|
||||
|
||||
private SortedMap<String, FileMetadata> metadataMap =
|
||||
new TreeMap<String, FileMetadata>();
|
||||
private SortedMap<String, byte[]> dataMap = new TreeMap<String, byte[]>();
|
||||
|
||||
public void initialize(URI uri, Configuration conf) throws IOException {
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
public void storeEmptyFile(String key) throws IOException {
|
||||
metadataMap.put(key, new FileMetadata(key, 0, System.currentTimeMillis()));
|
||||
dataMap.put(key, new byte[0]);
|
||||
}
|
||||
|
||||
public void storeFile(String key, File file, byte[] md5Hash)
|
||||
throws IOException {
|
||||
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
byte[] buf = new byte[8192];
|
||||
int numRead;
|
||||
BufferedInputStream in = null;
|
||||
try {
|
||||
in = new BufferedInputStream(new FileInputStream(file));
|
||||
while ((numRead = in.read(buf)) >= 0) {
|
||||
out.write(buf, 0, numRead);
|
||||
}
|
||||
} finally {
|
||||
if (in != null) {
|
||||
in.close();
|
||||
}
|
||||
}
|
||||
metadataMap.put(key,
|
||||
new FileMetadata(key, file.length(), System.currentTimeMillis()));
|
||||
dataMap.put(key, out.toByteArray());
|
||||
}
|
||||
|
||||
public InputStream retrieve(String key) throws IOException {
|
||||
return retrieve(key, 0);
|
||||
}
|
||||
|
||||
public InputStream retrieve(String key, long byteRangeStart)
|
||||
throws IOException {
|
||||
|
||||
byte[] data = dataMap.get(key);
|
||||
File file = createTempFile();
|
||||
BufferedOutputStream out = null;
|
||||
try {
|
||||
out = new BufferedOutputStream(new FileOutputStream(file));
|
||||
out.write(data, (int) byteRangeStart,
|
||||
data.length - (int) byteRangeStart);
|
||||
} finally {
|
||||
if (out != null) {
|
||||
out.close();
|
||||
}
|
||||
}
|
||||
return new FileInputStream(file);
|
||||
}
|
||||
|
||||
private File createTempFile() throws IOException {
|
||||
File dir = new File(conf.get("fs.s3.buffer.dir"));
|
||||
if (!dir.exists() && !dir.mkdirs()) {
|
||||
throw new IOException("Cannot create S3 buffer directory: " + dir);
|
||||
}
|
||||
File result = File.createTempFile("test-", ".tmp", dir);
|
||||
result.deleteOnExit();
|
||||
return result;
|
||||
}
|
||||
|
||||
public FileMetadata retrieveMetadata(String key) throws IOException {
|
||||
return metadataMap.get(key);
|
||||
}
|
||||
|
||||
public PartialListing list(String prefix, int maxListingLength)
|
||||
throws IOException {
|
||||
return list(prefix, maxListingLength, null);
|
||||
}
|
||||
|
||||
public PartialListing list(String prefix, int maxListingLength,
|
||||
String priorLastKey) throws IOException {
|
||||
|
||||
return list(prefix, PATH_DELIMITER, maxListingLength, priorLastKey);
|
||||
}
|
||||
|
||||
public PartialListing listAll(String prefix, int maxListingLength,
|
||||
String priorLastKey) throws IOException {
|
||||
|
||||
return list(prefix, null, maxListingLength, priorLastKey);
|
||||
}
|
||||
|
||||
private PartialListing list(String prefix, String delimiter,
|
||||
int maxListingLength, String priorLastKey) throws IOException {
|
||||
|
||||
if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) {
|
||||
prefix += PATH_DELIMITER;
|
||||
}
|
||||
|
||||
List<FileMetadata> metadata = new ArrayList<FileMetadata>();
|
||||
SortedSet<String> commonPrefixes = new TreeSet<String>();
|
||||
for (String key : dataMap.keySet()) {
|
||||
if (key.startsWith(prefix)) {
|
||||
if (delimiter == null) {
|
||||
metadata.add(retrieveMetadata(key));
|
||||
} else {
|
||||
int delimIndex = key.indexOf(delimiter, prefix.length());
|
||||
if (delimIndex == -1) {
|
||||
metadata.add(retrieveMetadata(key));
|
||||
} else {
|
||||
String commonPrefix = key.substring(0, delimIndex);
|
||||
commonPrefixes.add(commonPrefix);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (metadata.size() + commonPrefixes.size() == maxListingLength) {
|
||||
new PartialListing(key, metadata.toArray(new FileMetadata[0]),
|
||||
commonPrefixes.toArray(new String[0]));
|
||||
}
|
||||
}
|
||||
return new PartialListing(null, metadata.toArray(new FileMetadata[0]),
|
||||
commonPrefixes.toArray(new String[0]));
|
||||
}
|
||||
|
||||
public void delete(String key) throws IOException {
|
||||
metadataMap.remove(key);
|
||||
dataMap.remove(key);
|
||||
}
|
||||
|
||||
public void rename(String srcKey, String dstKey) throws IOException {
|
||||
metadataMap.put(dstKey, metadataMap.remove(srcKey));
|
||||
dataMap.put(dstKey, dataMap.remove(srcKey));
|
||||
}
|
||||
|
||||
public void purge(String prefix) throws IOException {
|
||||
Iterator<Entry<String, FileMetadata>> i =
|
||||
metadataMap.entrySet().iterator();
|
||||
while (i.hasNext()) {
|
||||
Entry<String, FileMetadata> entry = i.next();
|
||||
if (entry.getKey().startsWith(prefix)) {
|
||||
dataMap.remove(entry.getKey());
|
||||
i.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void dump() throws IOException {
|
||||
System.out.println(metadataMap.values());
|
||||
System.out.println(dataMap.keySet());
|
||||
}
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3native;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class Jets3tNativeS3FileSystemContractTest
|
||||
extends NativeS3FileSystemContractBaseTest {
|
||||
|
||||
@Override
|
||||
NativeFileSystemStore getNativeFileSystemStore() throws IOException {
|
||||
return new Jets3tNativeFileSystemStore();
|
||||
}
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3native;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystemContractBaseTest;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
public abstract class NativeS3FileSystemContractBaseTest
|
||||
extends FileSystemContractBaseTest {
|
||||
|
||||
private NativeFileSystemStore store;
|
||||
|
||||
abstract NativeFileSystemStore getNativeFileSystemStore() throws IOException;
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
store = getNativeFileSystemStore();
|
||||
fs = new NativeS3FileSystem(store);
|
||||
fs.initialize(URI.create(conf.get("test.fs.s3n.name")), conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
store.purge("test");
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
public void testListStatusForRoot() throws Exception {
|
||||
Path testDir = path("/test");
|
||||
assertTrue(fs.mkdirs(testDir));
|
||||
|
||||
FileStatus[] paths = fs.listStatus(path("/"));
|
||||
assertEquals(1, paths.length);
|
||||
assertEquals(path("/test"), paths[0].getPath());
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3native;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class TestInMemoryNativeS3FileSystemContract
|
||||
extends NativeS3FileSystemContractBaseTest {
|
||||
|
||||
@Override
|
||||
NativeFileSystemStore getNativeFileSystemStore() throws IOException {
|
||||
return new InMemoryNativeFileSystemStore();
|
||||
}
|
||||
}
|
139
src/test/org/apache/hadoop/http/TestGlobalFilter.java
Normal file
139
src/test/org/apache/hadoop/http/TestGlobalFilter.java
Normal file
@ -0,0 +1,139 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.http;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
import javax.servlet.FilterConfig;
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.ServletRequest;
|
||||
import javax.servlet.ServletResponse;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
public class TestGlobalFilter extends junit.framework.TestCase {
|
||||
static final Log LOG = LogFactory.getLog(HttpServer.class);
|
||||
static final Set<String> RECORDS = new TreeSet<String>();
|
||||
|
||||
/** A very simple filter that records accessed uri's */
|
||||
static public class RecordingFilter implements Filter {
|
||||
private FilterConfig filterConfig = null;
|
||||
|
||||
public void init(FilterConfig filterConfig) {
|
||||
this.filterConfig = filterConfig;
|
||||
}
|
||||
|
||||
public void destroy() {
|
||||
this.filterConfig = null;
|
||||
}
|
||||
|
||||
public void doFilter(ServletRequest request, ServletResponse response,
|
||||
FilterChain chain) throws IOException, ServletException {
|
||||
if (filterConfig == null)
|
||||
return;
|
||||
|
||||
String uri = ((HttpServletRequest)request).getRequestURI();
|
||||
LOG.info("filtering " + uri);
|
||||
RECORDS.add(uri);
|
||||
chain.doFilter(request, response);
|
||||
}
|
||||
|
||||
/** Configuration for RecordingFilter */
|
||||
static public class Initializer extends FilterInitializer {
|
||||
public Initializer() {}
|
||||
|
||||
void initFilter(FilterContainer container) {
|
||||
container.addGlobalFilter("recording", RecordingFilter.class.getName(), null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** access a url, ignoring some IOException such as the page does not exist */
|
||||
static void access(String urlstring) throws IOException {
|
||||
LOG.warn("access " + urlstring);
|
||||
URL url = new URL(urlstring);
|
||||
URLConnection connection = url.openConnection();
|
||||
connection.connect();
|
||||
|
||||
try {
|
||||
BufferedReader in = new BufferedReader(new InputStreamReader(
|
||||
connection.getInputStream()));
|
||||
try {
|
||||
for(; in.readLine() != null; );
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
} catch(IOException ioe) {
|
||||
LOG.warn("urlstring=" + urlstring, ioe);
|
||||
}
|
||||
}
|
||||
|
||||
public void testServletFilter() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
|
||||
//start a http server with CountingFilter
|
||||
conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
|
||||
RecordingFilter.Initializer.class.getName());
|
||||
HttpServer http = new HttpServer("datanode", "localhost", 0, true, conf);
|
||||
http.start();
|
||||
|
||||
final String fsckURL = "/fsck";
|
||||
final String stacksURL = "/stacks";
|
||||
final String ajspURL = "/a.jsp";
|
||||
final String listPathsURL = "/listPaths";
|
||||
final String dataURL = "/data";
|
||||
final String streamFile = "/streamFile";
|
||||
final String rootURL = "/";
|
||||
final String allURL = "/*";
|
||||
final String outURL = "/static/a.out";
|
||||
final String logURL = "/logs/a.log";
|
||||
|
||||
final String[] urls = {fsckURL, stacksURL, ajspURL, listPathsURL,
|
||||
dataURL, streamFile, rootURL, allURL, outURL, logURL};
|
||||
|
||||
//access the urls
|
||||
final String prefix = "http://localhost:" + http.getPort();
|
||||
try {
|
||||
for(int i = 0; i < urls.length; i++) {
|
||||
access(prefix + urls[i]);
|
||||
}
|
||||
} finally {
|
||||
http.stop();
|
||||
}
|
||||
|
||||
LOG.info("RECORDS = " + RECORDS);
|
||||
|
||||
//verify records
|
||||
for(int i = 0; i < urls.length; i++) {
|
||||
assertTrue(RECORDS.remove(urls[i]));
|
||||
}
|
||||
assertTrue(RECORDS.isEmpty());
|
||||
}
|
||||
}
|
138
src/test/org/apache/hadoop/http/TestServletFilter.java
Normal file
138
src/test/org/apache/hadoop/http/TestServletFilter.java
Normal file
@ -0,0 +1,138 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.http;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.util.Random;
|
||||
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
import javax.servlet.FilterConfig;
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.ServletRequest;
|
||||
import javax.servlet.ServletResponse;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
public class TestServletFilter extends junit.framework.TestCase {
|
||||
static final Log LOG = LogFactory.getLog(HttpServer.class);
|
||||
static volatile String uri = null;
|
||||
|
||||
/** A very simple filter which record the uri filtered. */
|
||||
static public class SimpleFilter implements Filter {
|
||||
private FilterConfig filterConfig = null;
|
||||
|
||||
public void init(FilterConfig filterConfig) {
|
||||
this.filterConfig = filterConfig;
|
||||
}
|
||||
|
||||
public void destroy() {
|
||||
this.filterConfig = null;
|
||||
}
|
||||
|
||||
public void doFilter(ServletRequest request, ServletResponse response,
|
||||
FilterChain chain) throws IOException, ServletException {
|
||||
if (filterConfig == null)
|
||||
return;
|
||||
|
||||
uri = ((HttpServletRequest)request).getRequestURI();
|
||||
LOG.info("filtering " + uri);
|
||||
chain.doFilter(request, response);
|
||||
}
|
||||
|
||||
/** Configuration for the filter */
|
||||
static public class Initializer extends FilterInitializer {
|
||||
public Initializer() {}
|
||||
|
||||
void initFilter(FilterContainer container) {
|
||||
container.addFilter("simple", SimpleFilter.class.getName(), null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** access a url, ignoring some IOException such as the page does not exist */
|
||||
static void access(String urlstring) throws IOException {
|
||||
LOG.warn("access " + urlstring);
|
||||
URL url = new URL(urlstring);
|
||||
URLConnection connection = url.openConnection();
|
||||
connection.connect();
|
||||
|
||||
try {
|
||||
BufferedReader in = new BufferedReader(new InputStreamReader(
|
||||
connection.getInputStream()));
|
||||
try {
|
||||
for(; in.readLine() != null; );
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
} catch(IOException ioe) {
|
||||
LOG.warn("urlstring=" + urlstring, ioe);
|
||||
}
|
||||
}
|
||||
|
||||
public void testServletFilter() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
|
||||
//start a http server with CountingFilter
|
||||
conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
|
||||
SimpleFilter.Initializer.class.getName());
|
||||
HttpServer http = new HttpServer("datanode", "localhost", 0, true, conf);
|
||||
http.start();
|
||||
|
||||
final String fsckURL = "/fsck";
|
||||
final String stacksURL = "/stacks";
|
||||
final String ajspURL = "/a.jsp";
|
||||
final String logURL = "/logs/a.log";
|
||||
final String hadooplogoURL = "/static/hadoop-logo.jpg";
|
||||
|
||||
final String[] urls = {fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL};
|
||||
final Random ran = new Random();
|
||||
final int[] sequence = new int[50];
|
||||
|
||||
//generate a random sequence and update counts
|
||||
for(int i = 0; i < sequence.length; i++) {
|
||||
sequence[i] = ran.nextInt(urls.length);
|
||||
}
|
||||
|
||||
//access the urls as the sequence
|
||||
final String prefix = "http://localhost:" + http.getPort();
|
||||
try {
|
||||
for(int i = 0; i < sequence.length; i++) {
|
||||
access(prefix + urls[sequence[i]]);
|
||||
|
||||
//make sure everything except fsck get filtered
|
||||
if (sequence[i] == 0) {
|
||||
assertEquals(null, uri);
|
||||
} else {
|
||||
assertEquals(urls[sequence[i]], uri);
|
||||
uri = null;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
http.stop();
|
||||
}
|
||||
}
|
||||
}
|
108
src/test/org/apache/hadoop/io/RandomDatum.java
Normal file
108
src/test/org/apache/hadoop/io/RandomDatum.java
Normal file
@ -0,0 +1,108 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.util.*;
|
||||
import java.io.*;
|
||||
|
||||
public class RandomDatum implements WritableComparable {
|
||||
private int length;
|
||||
private byte[] data;
|
||||
|
||||
public RandomDatum() {}
|
||||
|
||||
public RandomDatum(Random random) {
|
||||
length = 10 + (int) Math.pow(10.0, random.nextFloat() * 3.0);
|
||||
data = new byte[length];
|
||||
random.nextBytes(data);
|
||||
}
|
||||
|
||||
public int getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(length);
|
||||
out.write(data);
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
length = in.readInt();
|
||||
if (data == null || length > data.length)
|
||||
data = new byte[length];
|
||||
in.readFully(data, 0, length);
|
||||
}
|
||||
|
||||
public int compareTo(Object o) {
|
||||
RandomDatum that = (RandomDatum)o;
|
||||
return WritableComparator.compareBytes(this.data, 0, this.length,
|
||||
that.data, 0, that.length);
|
||||
}
|
||||
|
||||
public boolean equals(Object o) {
|
||||
return compareTo(o) == 0;
|
||||
}
|
||||
|
||||
private static final char[] HEX_DIGITS =
|
||||
{'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
|
||||
|
||||
/** Returns a string representation of this object. */
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(length*2);
|
||||
for (int i = 0; i < length; i++) {
|
||||
int b = data[i];
|
||||
buf.append(HEX_DIGITS[(b >> 4) & 0xf]);
|
||||
buf.append(HEX_DIGITS[b & 0xf]);
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
public static class Generator {
|
||||
Random random;
|
||||
|
||||
private RandomDatum key;
|
||||
private RandomDatum value;
|
||||
|
||||
public Generator() { random = new Random(); }
|
||||
public Generator(int seed) { random = new Random(seed); }
|
||||
|
||||
public RandomDatum getKey() { return key; }
|
||||
public RandomDatum getValue() { return value; }
|
||||
|
||||
public void next() {
|
||||
key = new RandomDatum(random);
|
||||
value = new RandomDatum(random);
|
||||
}
|
||||
}
|
||||
|
||||
/** A WritableComparator optimized for RandomDatum. */
|
||||
public static class Comparator extends WritableComparator {
|
||||
public Comparator() {
|
||||
super(RandomDatum.class);
|
||||
}
|
||||
|
||||
public int compare(byte[] b1, int s1, int l1,
|
||||
byte[] b2, int s2, int l2) {
|
||||
int n1 = readInt(b1, s1);
|
||||
int n2 = readInt(b2, s2);
|
||||
return compareBytes(b1, s1+4, n1, b2, s2+4, n2);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
155
src/test/org/apache/hadoop/io/TestArrayFile.java
Normal file
155
src/test/org/apache/hadoop/io/TestArrayFile.java
Normal file
@ -0,0 +1,155 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.*;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.*;
|
||||
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.conf.*;
|
||||
|
||||
/** Support for flat files of binary key/value pairs. */
|
||||
public class TestArrayFile extends TestCase {
|
||||
private static final Log LOG = LogFactory.getLog(TestArrayFile.class);
|
||||
private static String FILE =
|
||||
System.getProperty("test.build.data",".") + "/test.array";
|
||||
|
||||
public TestArrayFile(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
public void testArrayFile() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
RandomDatum[] data = generate(10000);
|
||||
writeTest(fs, data, FILE);
|
||||
readTest(fs, data, FILE, conf);
|
||||
}
|
||||
|
||||
public void testEmptyFile() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
writeTest(fs, new RandomDatum[0], FILE);
|
||||
ArrayFile.Reader reader = new ArrayFile.Reader(fs, FILE, conf);
|
||||
assertNull(reader.get(0, new RandomDatum()));
|
||||
reader.close();
|
||||
}
|
||||
|
||||
private static RandomDatum[] generate(int count) {
|
||||
LOG.debug("generating " + count + " records in debug");
|
||||
RandomDatum[] data = new RandomDatum[count];
|
||||
RandomDatum.Generator generator = new RandomDatum.Generator();
|
||||
for (int i = 0; i < count; i++) {
|
||||
generator.next();
|
||||
data[i] = generator.getValue();
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
private static void writeTest(FileSystem fs, RandomDatum[] data, String file)
|
||||
throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
MapFile.delete(fs, file);
|
||||
LOG.debug("creating with " + data.length + " debug");
|
||||
ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs, file, RandomDatum.class);
|
||||
writer.setIndexInterval(100);
|
||||
for (int i = 0; i < data.length; i++)
|
||||
writer.append(data[i]);
|
||||
writer.close();
|
||||
}
|
||||
|
||||
private static void readTest(FileSystem fs, RandomDatum[] data, String file, Configuration conf)
|
||||
throws IOException {
|
||||
RandomDatum v = new RandomDatum();
|
||||
LOG.debug("reading " + data.length + " debug");
|
||||
ArrayFile.Reader reader = new ArrayFile.Reader(fs, file, conf);
|
||||
for (int i = 0; i < data.length; i++) { // try forwards
|
||||
reader.get(i, v);
|
||||
if (!v.equals(data[i])) {
|
||||
throw new RuntimeException("wrong value at " + i);
|
||||
}
|
||||
}
|
||||
for (int i = data.length-1; i >= 0; i--) { // then backwards
|
||||
reader.get(i, v);
|
||||
if (!v.equals(data[i])) {
|
||||
throw new RuntimeException("wrong value at " + i);
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
LOG.debug("done reading " + data.length + " debug");
|
||||
}
|
||||
|
||||
|
||||
/** For debugging and testing. */
|
||||
public static void main(String[] args) throws Exception {
|
||||
int count = 1024 * 1024;
|
||||
boolean create = true;
|
||||
boolean check = true;
|
||||
String file = FILE;
|
||||
String usage = "Usage: TestArrayFile [-count N] [-nocreate] [-nocheck] file";
|
||||
|
||||
if (args.length == 0) {
|
||||
System.err.println(usage);
|
||||
System.exit(-1);
|
||||
}
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
int i = 0;
|
||||
Path fpath = null;
|
||||
FileSystem fs = null;
|
||||
try {
|
||||
for (; i < args.length; i++) { // parse command line
|
||||
if (args[i] == null) {
|
||||
continue;
|
||||
} else if (args[i].equals("-count")) {
|
||||
count = Integer.parseInt(args[++i]);
|
||||
} else if (args[i].equals("-nocreate")) {
|
||||
create = false;
|
||||
} else if (args[i].equals("-nocheck")) {
|
||||
check = false;
|
||||
} else {
|
||||
// file is required parameter
|
||||
file = args[i];
|
||||
fpath=new Path(file);
|
||||
}
|
||||
}
|
||||
|
||||
fs = fpath.getFileSystem(conf);
|
||||
|
||||
LOG.info("count = " + count);
|
||||
LOG.info("create = " + create);
|
||||
LOG.info("check = " + check);
|
||||
LOG.info("file = " + file);
|
||||
|
||||
RandomDatum[] data = generate(count);
|
||||
|
||||
if (create) {
|
||||
writeTest(fs, data, file);
|
||||
}
|
||||
|
||||
if (check) {
|
||||
readTest(fs, data, file, conf);
|
||||
}
|
||||
} finally {
|
||||
fs.close();
|
||||
}
|
||||
}
|
||||
}
|
64
src/test/org/apache/hadoop/io/TestArrayWritable.java
Normal file
64
src/test/org/apache/hadoop/io/TestArrayWritable.java
Normal file
@ -0,0 +1,64 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/** Unit tests for ArrayWritable */
|
||||
public class TestArrayWritable extends TestCase {
|
||||
|
||||
static class TextArrayWritable extends ArrayWritable {
|
||||
public TextArrayWritable() {
|
||||
super(Text.class);
|
||||
}
|
||||
}
|
||||
|
||||
public TestArrayWritable(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* If valueClass is undefined, readFields should throw an exception indicating
|
||||
* that the field is null. Otherwise, readFields should succeed.
|
||||
*/
|
||||
public void testThrowUndefinedValueException() throws IOException {
|
||||
// Get a buffer containing a simple text array
|
||||
Text[] elements = {new Text("zero"), new Text("one"), new Text("two")};
|
||||
TextArrayWritable sourceArray = new TextArrayWritable();
|
||||
sourceArray.set(elements);
|
||||
|
||||
// Write it to a normal output buffer
|
||||
DataOutputBuffer out = new DataOutputBuffer();
|
||||
DataInputBuffer in = new DataInputBuffer();
|
||||
sourceArray.write(out);
|
||||
|
||||
// Read the output buffer with TextReadable. Since the valueClass is defined,
|
||||
// this should succeed
|
||||
TextArrayWritable destArray = new TextArrayWritable();
|
||||
in.reset(out.getData(), out.getLength());
|
||||
destArray.readFields(in);
|
||||
Writable[] destElements = destArray.get();
|
||||
assertTrue(destElements.length == elements.length);
|
||||
for (int i = 0; i < elements.length; i++) {
|
||||
assertEquals(destElements[i],elements[i]);
|
||||
}
|
||||
}
|
||||
}
|
70
src/test/org/apache/hadoop/io/TestBloomMapFile.java
Normal file
70
src/test/org/apache/hadoop/io/TestBloomMapFile.java
Normal file
@ -0,0 +1,70 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestBloomMapFile extends TestCase {
|
||||
private static Configuration conf = new Configuration();
|
||||
|
||||
public void testMembershipTest() throws Exception {
|
||||
// write the file
|
||||
Path dirName = new Path(System.getProperty("test.build.data",".") +
|
||||
getName() + ".bloommapfile");
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
Path qualifiedDirName = fs.makeQualified(dirName);
|
||||
conf.setInt("io.mapfile.bloom.size", 2048);
|
||||
BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs,
|
||||
qualifiedDirName.toString(), IntWritable.class, Text.class);
|
||||
IntWritable key = new IntWritable();
|
||||
Text value = new Text();
|
||||
for (int i = 0; i < 2000; i += 2) {
|
||||
key.set(i);
|
||||
value.set("00" + i);
|
||||
writer.append(key, value);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
BloomMapFile.Reader reader = new BloomMapFile.Reader(fs,
|
||||
qualifiedDirName.toString(), conf);
|
||||
// check false positives rate
|
||||
int falsePos = 0;
|
||||
int falseNeg = 0;
|
||||
for (int i = 0; i < 2000; i++) {
|
||||
key.set(i);
|
||||
boolean exists = reader.probablyHasKey(key);
|
||||
if (i % 2 == 0) {
|
||||
if (!exists) falseNeg++;
|
||||
} else {
|
||||
if (exists) falsePos++;
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
fs.delete(qualifiedDirName, true);
|
||||
System.out.println("False negatives: " + falseNeg);
|
||||
assertEquals(0, falseNeg);
|
||||
System.out.println("False positives: " + falsePos);
|
||||
assertTrue(falsePos < 2);
|
||||
}
|
||||
|
||||
}
|
95
src/test/org/apache/hadoop/io/TestBytesWritable.java
Normal file
95
src/test/org/apache/hadoop/io/TestBytesWritable.java
Normal file
@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/**
|
||||
* This is the unit test for BytesWritable.
|
||||
*/
|
||||
public class TestBytesWritable extends TestCase {
|
||||
|
||||
public void testSizeChange() throws Exception {
|
||||
byte[] hadoop = "hadoop".getBytes();
|
||||
BytesWritable buf = new BytesWritable(hadoop);
|
||||
int size = buf.getLength();
|
||||
int orig_capacity = buf.getCapacity();
|
||||
buf.setSize(size*2);
|
||||
int new_capacity = buf.getCapacity();
|
||||
System.arraycopy(buf.getBytes(), 0, buf.getBytes(), size, size);
|
||||
assertTrue(new_capacity >= size * 2);
|
||||
assertEquals(size * 2, buf.getLength());
|
||||
assertTrue(new_capacity != orig_capacity);
|
||||
buf.setSize(size*4);
|
||||
assertTrue(new_capacity != buf.getCapacity());
|
||||
for(int i=0; i < size*2; ++i) {
|
||||
assertEquals(hadoop[i%size], buf.getBytes()[i]);
|
||||
}
|
||||
// shrink the buffer
|
||||
buf.setCapacity(1);
|
||||
// make sure the size has been cut down too
|
||||
assertEquals(1, buf.getLength());
|
||||
// but that the data is still there
|
||||
assertEquals(hadoop[0], buf.getBytes()[0]);
|
||||
}
|
||||
|
||||
public void testHash() throws Exception {
|
||||
byte[] owen = "owen".getBytes();
|
||||
BytesWritable buf = new BytesWritable(owen);
|
||||
assertEquals(4347922, buf.hashCode());
|
||||
buf.setCapacity(10000);
|
||||
assertEquals(4347922, buf.hashCode());
|
||||
buf.setSize(0);
|
||||
assertEquals(1, buf.hashCode());
|
||||
}
|
||||
|
||||
public void testCompare() throws Exception {
|
||||
byte[][] values = new byte[][]{"abc".getBytes(),
|
||||
"ad".getBytes(),
|
||||
"abcd".getBytes(),
|
||||
"".getBytes(),
|
||||
"b".getBytes()};
|
||||
BytesWritable[] buf = new BytesWritable[values.length];
|
||||
for(int i=0; i < values.length; ++i) {
|
||||
buf[i] = new BytesWritable(values[i]);
|
||||
}
|
||||
// check to make sure the compare function is symetric and reflexive
|
||||
for(int i=0; i < values.length; ++i) {
|
||||
for(int j=0; j < values.length; ++j) {
|
||||
assertTrue(buf[i].compareTo(buf[j]) == -buf[j].compareTo(buf[i]));
|
||||
assertTrue((i == j) == (buf[i].compareTo(buf[j]) == 0));
|
||||
}
|
||||
}
|
||||
assertTrue(buf[0].compareTo(buf[1]) < 0);
|
||||
assertTrue(buf[1].compareTo(buf[2]) > 0);
|
||||
assertTrue(buf[2].compareTo(buf[3]) > 0);
|
||||
assertTrue(buf[3].compareTo(buf[4]) < 0);
|
||||
}
|
||||
|
||||
private void checkToString(byte[] input, String expected) {
|
||||
String actual = new BytesWritable(input).toString();
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
checkToString(new byte[]{0,1,2,0x10}, "00 01 02 10");
|
||||
checkToString(new byte[]{-0x80, -0x7f, -0x1, -0x2, 1, 0},
|
||||
"80 81 ff fe 01 00");
|
||||
}
|
||||
}
|
||||
|
113
src/test/org/apache/hadoop/io/TestDefaultStringifier.java
Normal file
113
src/test/org/apache/hadoop/io/TestDefaultStringifier.java
Normal file
@ -0,0 +1,113 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
public class TestDefaultStringifier extends TestCase {
|
||||
|
||||
private static Configuration conf = new Configuration();
|
||||
private static final Log LOG = LogFactory.getLog(TestDefaultStringifier.class);
|
||||
|
||||
private char[] alphabet = "abcdefghijklmnopqrstuvwxyz".toCharArray();
|
||||
|
||||
public void testWithWritable() throws Exception {
|
||||
|
||||
conf.set("io.serializations", "org.apache.hadoop.io.serializer.WritableSerialization");
|
||||
|
||||
LOG.info("Testing DefaultStringifier with Text");
|
||||
|
||||
Random random = new Random();
|
||||
|
||||
//test with a Text
|
||||
for(int i=0;i<10;i++) {
|
||||
//generate a random string
|
||||
StringBuilder builder = new StringBuilder();
|
||||
int strLen = random.nextInt(40);
|
||||
for(int j=0; j< strLen; j++) {
|
||||
builder.append(alphabet[random.nextInt(alphabet.length)]);
|
||||
}
|
||||
Text text = new Text(builder.toString());
|
||||
DefaultStringifier<Text> stringifier = new DefaultStringifier<Text>(conf, Text.class);
|
||||
|
||||
String str = stringifier.toString(text);
|
||||
Text claimedText = stringifier.fromString(str);
|
||||
LOG.info("Object: " + text);
|
||||
LOG.info("String representation of the object: " + str);
|
||||
assertEquals(text, claimedText);
|
||||
}
|
||||
}
|
||||
|
||||
public void testWithJavaSerialization() throws Exception {
|
||||
conf.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization");
|
||||
|
||||
LOG.info("Testing DefaultStringifier with Serializable Integer");
|
||||
|
||||
//Integer implements Serializable
|
||||
Integer testInt = Integer.valueOf(42);
|
||||
DefaultStringifier<Integer> stringifier = new DefaultStringifier<Integer>(conf, Integer.class);
|
||||
|
||||
String str = stringifier.toString(testInt);
|
||||
Integer claimedInt = stringifier.fromString(str);
|
||||
LOG.info("String representation of the object: " + str);
|
||||
|
||||
assertEquals(testInt, claimedInt);
|
||||
}
|
||||
|
||||
public void testStoreLoad() throws IOException {
|
||||
|
||||
LOG.info("Testing DefaultStringifier#store() and #load()");
|
||||
conf.set("io.serializations", "org.apache.hadoop.io.serializer.WritableSerialization");
|
||||
Text text = new Text("uninteresting test string");
|
||||
String keyName = "test.defaultstringifier.key1";
|
||||
|
||||
DefaultStringifier.store(conf,text, keyName);
|
||||
|
||||
Text claimedText = DefaultStringifier.load(conf, keyName, Text.class);
|
||||
assertEquals("DefaultStringifier#load() or #store() might be flawed"
|
||||
, text, claimedText);
|
||||
|
||||
}
|
||||
|
||||
public void testStoreLoadArray() throws IOException {
|
||||
LOG.info("Testing DefaultStringifier#storeArray() and #loadArray()");
|
||||
conf.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization");
|
||||
|
||||
String keyName = "test.defaultstringifier.key2";
|
||||
|
||||
Integer[] array = new Integer[] {1,2,3,4,5};
|
||||
|
||||
|
||||
DefaultStringifier.storeArray(conf, array, keyName);
|
||||
|
||||
Integer[] claimedArray = DefaultStringifier.<Integer>loadArray(conf, keyName, Integer.class);
|
||||
for (int i = 0; i < array.length; i++) {
|
||||
assertEquals("two arrays are not equal", array[i], claimedArray[i]);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
103
src/test/org/apache/hadoop/io/TestEnumSetWritable.java
Normal file
103
src/test/org/apache/hadoop/io/TestEnumSetWritable.java
Normal file
@ -0,0 +1,103 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/** Unit test for EnumSetWritable */
|
||||
public class TestEnumSetWritable extends TestCase {
|
||||
|
||||
enum TestEnumSet {
|
||||
CREATE, OVERWRITE, APPEND;
|
||||
}
|
||||
|
||||
EnumSet<TestEnumSet> nonEmptyFlag = EnumSet.of(TestEnumSet.APPEND);
|
||||
EnumSetWritable<TestEnumSet> nonEmptyFlagWritable = new EnumSetWritable<TestEnumSet>(
|
||||
nonEmptyFlag);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testSerializeAndDeserializeNonEmpty() throws IOException {
|
||||
DataOutputBuffer out = new DataOutputBuffer();
|
||||
ObjectWritable.writeObject(out, nonEmptyFlagWritable, nonEmptyFlagWritable
|
||||
.getClass(), null);
|
||||
DataInputBuffer in = new DataInputBuffer();
|
||||
in.reset(out.getData(), out.getLength());
|
||||
EnumSet<TestEnumSet> read = ((EnumSetWritable<TestEnumSet>) ObjectWritable
|
||||
.readObject(in, null)).get();
|
||||
assertEquals(read, nonEmptyFlag);
|
||||
}
|
||||
|
||||
EnumSet<TestEnumSet> emptyFlag = EnumSet.noneOf(TestEnumSet.class);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testSerializeAndDeserializeEmpty() throws IOException {
|
||||
|
||||
boolean gotException = false;
|
||||
try {
|
||||
new EnumSetWritable<TestEnumSet>(emptyFlag);
|
||||
} catch (RuntimeException e) {
|
||||
gotException = true;
|
||||
}
|
||||
|
||||
assertTrue(
|
||||
"Instantiate empty EnumSetWritable with no element type class providesd should throw exception.",
|
||||
gotException);
|
||||
|
||||
EnumSetWritable<TestEnumSet> emptyFlagWritable = new EnumSetWritable<TestEnumSet>(
|
||||
emptyFlag, TestEnumSet.class);
|
||||
DataOutputBuffer out = new DataOutputBuffer();
|
||||
ObjectWritable.writeObject(out, emptyFlagWritable, emptyFlagWritable
|
||||
.getClass(), null);
|
||||
DataInputBuffer in = new DataInputBuffer();
|
||||
in.reset(out.getData(), out.getLength());
|
||||
EnumSet<TestEnumSet> read = ((EnumSetWritable<TestEnumSet>) ObjectWritable
|
||||
.readObject(in, null)).get();
|
||||
assertEquals(read, emptyFlag);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testSerializeAndDeserializeNull() throws IOException {
|
||||
|
||||
boolean gotException = false;
|
||||
try {
|
||||
new EnumSetWritable<TestEnumSet>(null);
|
||||
} catch (RuntimeException e) {
|
||||
gotException = true;
|
||||
}
|
||||
|
||||
assertTrue(
|
||||
"Instantiate empty EnumSetWritable with no element type class providesd should throw exception.",
|
||||
gotException);
|
||||
|
||||
EnumSetWritable<TestEnumSet> nullFlagWritable = new EnumSetWritable<TestEnumSet>(
|
||||
null, TestEnumSet.class);
|
||||
|
||||
DataOutputBuffer out = new DataOutputBuffer();
|
||||
ObjectWritable.writeObject(out, nullFlagWritable, nullFlagWritable
|
||||
.getClass(), null);
|
||||
DataInputBuffer in = new DataInputBuffer();
|
||||
in.reset(out.getData(), out.getLength());
|
||||
EnumSet<TestEnumSet> read = ((EnumSetWritable<TestEnumSet>) ObjectWritable
|
||||
.readObject(in, null)).get();
|
||||
assertEquals(read, null);
|
||||
}
|
||||
}
|
178
src/test/org/apache/hadoop/io/TestGenericWritable.java
Normal file
178
src/test/org/apache/hadoop/io/TestGenericWritable.java
Normal file
@ -0,0 +1,178 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
/**
|
||||
* TestCase for {@link GenericWritable} class.
|
||||
* @see TestWritable#testWritable(Writable)
|
||||
*/
|
||||
public class TestGenericWritable extends TestCase {
|
||||
|
||||
private Configuration conf;
|
||||
public static final String CONF_TEST_KEY = "test.generic.writable";
|
||||
public static final String CONF_TEST_VALUE = "dummy";
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
conf = new Configuration();
|
||||
//set the configuration parameter
|
||||
conf.set(CONF_TEST_KEY, CONF_TEST_VALUE);
|
||||
}
|
||||
|
||||
/** Dummy class for testing {@link GenericWritable} */
|
||||
public static class Foo implements Writable {
|
||||
private String foo = "foo";
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
foo = Text.readString(in);
|
||||
}
|
||||
public void write(DataOutput out) throws IOException {
|
||||
Text.writeString(out, foo);
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof Foo))
|
||||
return false;
|
||||
return this.foo.equals(((Foo)obj).foo);
|
||||
}
|
||||
}
|
||||
/** Dummy class for testing {@link GenericWritable} */
|
||||
public static class Bar implements Writable, Configurable {
|
||||
private int bar = 42; //The Answer to The Ultimate Question Of Life, the Universe and Everything
|
||||
private Configuration conf = null;
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
bar = in.readInt();
|
||||
}
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(bar);
|
||||
}
|
||||
public Configuration getConf() {
|
||||
return conf;
|
||||
}
|
||||
public void setConf(Configuration conf) {
|
||||
this.conf = conf;
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof Bar))
|
||||
return false;
|
||||
return this.bar == ((Bar)obj).bar;
|
||||
}
|
||||
}
|
||||
|
||||
/** Dummy class for testing {@link GenericWritable} */
|
||||
public static class Baz extends Bar {
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in);
|
||||
//needs a configuration parameter
|
||||
assertEquals("Configuration is not set for the wrapped object",
|
||||
CONF_TEST_VALUE, getConf().get(CONF_TEST_KEY));
|
||||
}
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out);
|
||||
}
|
||||
}
|
||||
|
||||
/** Dummy class for testing {@link GenericWritable} */
|
||||
public static class FooGenericWritable extends GenericWritable {
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Class<? extends Writable>[] getTypes() {
|
||||
return new Class[] {Foo.class, Bar.class, Baz.class};
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if(! (obj instanceof FooGenericWritable))
|
||||
return false;
|
||||
return get().equals(((FooGenericWritable)obj).get());
|
||||
}
|
||||
}
|
||||
|
||||
public void testFooWritable() throws Exception {
|
||||
System.out.println("Testing Writable wrapped in GenericWritable");
|
||||
FooGenericWritable generic = new FooGenericWritable();
|
||||
generic.setConf(conf);
|
||||
Foo foo = new Foo();
|
||||
generic.set(foo);
|
||||
TestWritable.testWritable(generic);
|
||||
}
|
||||
|
||||
public void testBarWritable() throws Exception {
|
||||
System.out.println("Testing Writable, Configurable wrapped in GenericWritable");
|
||||
FooGenericWritable generic = new FooGenericWritable();
|
||||
generic.setConf(conf);
|
||||
Bar bar = new Bar();
|
||||
bar.setConf(conf);
|
||||
generic.set(bar);
|
||||
|
||||
//test writing generic writable
|
||||
FooGenericWritable after
|
||||
= (FooGenericWritable)TestWritable.testWritable(generic, conf);
|
||||
|
||||
//test configuration
|
||||
System.out.println("Testing if Configuration is passed to wrapped classes");
|
||||
assertTrue(after.get() instanceof Configurable);
|
||||
assertNotNull(((Configurable)after.get()).getConf());
|
||||
}
|
||||
|
||||
public void testBazWritable() throws Exception {
|
||||
System.out.println("Testing for GenericWritable to find class names");
|
||||
FooGenericWritable generic = new FooGenericWritable();
|
||||
generic.setConf(conf);
|
||||
Baz baz = new Baz();
|
||||
generic.set(baz);
|
||||
TestWritable.testWritable(generic, conf);
|
||||
}
|
||||
|
||||
public void testSet() throws Exception {
|
||||
Foo foo = new Foo();
|
||||
FooGenericWritable generic = new FooGenericWritable();
|
||||
//exception should not occur
|
||||
generic.set(foo);
|
||||
|
||||
try {
|
||||
//exception should occur, since IntWritable is not registered
|
||||
generic = new FooGenericWritable();
|
||||
generic.set(new IntWritable(1));
|
||||
fail("Generic writable should have thrown an exception for a Writable not registered");
|
||||
}catch (RuntimeException e) {
|
||||
//ignore
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testGet() throws Exception {
|
||||
Foo foo = new Foo();
|
||||
FooGenericWritable generic = new FooGenericWritable();
|
||||
generic.set(foo);
|
||||
assertEquals(foo, generic.get());
|
||||
}
|
||||
|
||||
}
|
115
src/test/org/apache/hadoop/io/TestMD5Hash.java
Normal file
115
src/test/org/apache/hadoop/io/TestMD5Hash.java
Normal file
@ -0,0 +1,115 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import org.apache.hadoop.io.TestWritable;
|
||||
import junit.framework.TestCase;
|
||||
import java.security.MessageDigest;
|
||||
import java.util.Random;
|
||||
|
||||
/** Unit tests for MD5Hash. */
|
||||
public class TestMD5Hash extends TestCase {
|
||||
public TestMD5Hash(String name) { super(name); }
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
public static MD5Hash getTestHash() throws Exception {
|
||||
MessageDigest digest = MessageDigest.getInstance("MD5");
|
||||
byte[] buffer = new byte[1024];
|
||||
RANDOM.nextBytes(buffer);
|
||||
digest.update(buffer);
|
||||
return new MD5Hash(digest.digest());
|
||||
}
|
||||
|
||||
protected static byte[] D00 = new byte[] {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
protected static byte[] DFF = new byte[] {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
|
||||
|
||||
public void testMD5Hash() throws Exception {
|
||||
MD5Hash md5Hash = getTestHash();
|
||||
|
||||
final MD5Hash md5Hash00
|
||||
= new MD5Hash(D00);
|
||||
|
||||
final MD5Hash md5HashFF
|
||||
= new MD5Hash(DFF);
|
||||
|
||||
MD5Hash orderedHash = new MD5Hash(new byte[]{1,2,3,4,5,6,7,8,9,10,11,12,
|
||||
13,14,15,16});
|
||||
MD5Hash backwardHash = new MD5Hash(new byte[]{-1,-2,-3,-4,-5,-6,-7,-8,
|
||||
-9,-10,-11,-12, -13, -14,
|
||||
-15,-16});
|
||||
MD5Hash closeHash1 = new MD5Hash(new byte[]{-1,0,0,0,0,0,0,0,
|
||||
0,0,0,0,0,0,0,0});
|
||||
MD5Hash closeHash2 = new MD5Hash(new byte[]{-1,1,0,0,0,0,0,0,
|
||||
0,0,0,0,0,0,0,0});
|
||||
|
||||
// test i/o
|
||||
TestWritable.testWritable(md5Hash);
|
||||
TestWritable.testWritable(md5Hash00);
|
||||
TestWritable.testWritable(md5HashFF);
|
||||
|
||||
// test equals()
|
||||
assertEquals(md5Hash, md5Hash);
|
||||
assertEquals(md5Hash00, md5Hash00);
|
||||
assertEquals(md5HashFF, md5HashFF);
|
||||
|
||||
// test compareTo()
|
||||
assertTrue(md5Hash.compareTo(md5Hash) == 0);
|
||||
assertTrue(md5Hash00.compareTo(md5Hash) < 0);
|
||||
assertTrue(md5HashFF.compareTo(md5Hash) > 0);
|
||||
|
||||
// test toString and string ctor
|
||||
assertEquals(md5Hash, new MD5Hash(md5Hash.toString()));
|
||||
assertEquals(md5Hash00, new MD5Hash(md5Hash00.toString()));
|
||||
assertEquals(md5HashFF, new MD5Hash(md5HashFF.toString()));
|
||||
|
||||
assertEquals(0x01020304, orderedHash.quarterDigest());
|
||||
assertEquals(0xfffefdfc, backwardHash.quarterDigest());
|
||||
|
||||
assertEquals(0x0102030405060708L, orderedHash.halfDigest());
|
||||
assertEquals(0xfffefdfcfbfaf9f8L, backwardHash.halfDigest());
|
||||
assertTrue("hash collision",
|
||||
closeHash1.hashCode() != closeHash2.hashCode());
|
||||
|
||||
Thread t1 = new Thread() {
|
||||
public void run() {
|
||||
for (int i = 0; i < 100; i++) {
|
||||
MD5Hash hash = new MD5Hash(DFF);
|
||||
assertEquals(hash, md5HashFF);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Thread t2 = new Thread() {
|
||||
public void run() {
|
||||
for (int i = 0; i < 100; i++) {
|
||||
MD5Hash hash = new MD5Hash(D00);
|
||||
assertEquals(hash, md5Hash00);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
t1.start();
|
||||
t2.start();
|
||||
t1.join();
|
||||
t2.join();
|
||||
|
||||
}
|
||||
|
||||
}
|
124
src/test/org/apache/hadoop/io/TestMapFile.java
Normal file
124
src/test/org/apache/hadoop/io/TestMapFile.java
Normal file
@ -0,0 +1,124 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestMapFile extends TestCase {
|
||||
private static Configuration conf = new Configuration();
|
||||
|
||||
/**
|
||||
* Test getClosest feature.
|
||||
* @throws Exception
|
||||
*/
|
||||
public void testGetClosest() throws Exception {
|
||||
// Write a mapfile of simple data: keys are
|
||||
Path dirName = new Path(System.getProperty("test.build.data",".") +
|
||||
getName() + ".mapfile");
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
Path qualifiedDirName = fs.makeQualified(dirName);
|
||||
// Make an index entry for every third insertion.
|
||||
MapFile.Writer.setIndexInterval(conf, 3);
|
||||
MapFile.Writer writer = new MapFile.Writer(conf, fs,
|
||||
qualifiedDirName.toString(), Text.class, Text.class);
|
||||
// Assert that the index interval is 1
|
||||
assertEquals(3, writer.getIndexInterval());
|
||||
// Add entries up to 100 in intervals of ten.
|
||||
final int FIRST_KEY = 10;
|
||||
for (int i = FIRST_KEY; i < 100; i += 10) {
|
||||
String iStr = Integer.toString(i);
|
||||
Text t = new Text("00".substring(iStr.length()) + iStr);
|
||||
writer.append(t, t);
|
||||
}
|
||||
writer.close();
|
||||
// Now do getClosest on created mapfile.
|
||||
MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
|
||||
conf);
|
||||
Text key = new Text("55");
|
||||
Text value = new Text();
|
||||
Text closest = (Text)reader.getClosest(key, value);
|
||||
// Assert that closest after 55 is 60
|
||||
assertEquals(new Text("60"), closest);
|
||||
// Get closest that falls before the passed key: 50
|
||||
closest = (Text)reader.getClosest(key, value, true);
|
||||
assertEquals(new Text("50"), closest);
|
||||
// Test get closest when we pass explicit key
|
||||
final Text TWENTY = new Text("20");
|
||||
closest = (Text)reader.getClosest(TWENTY, value);
|
||||
assertEquals(TWENTY, closest);
|
||||
closest = (Text)reader.getClosest(TWENTY, value, true);
|
||||
assertEquals(TWENTY, closest);
|
||||
// Test what happens at boundaries. Assert if searching a key that is
|
||||
// less than first key in the mapfile, that the first key is returned.
|
||||
key = new Text("00");
|
||||
closest = (Text)reader.getClosest(key, value);
|
||||
assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
|
||||
|
||||
// If we're looking for the first key before, and we pass in a key before
|
||||
// the first key in the file, we should get null
|
||||
closest = (Text)reader.getClosest(key, value, true);
|
||||
assertNull(closest);
|
||||
|
||||
// Assert that null is returned if key is > last entry in mapfile.
|
||||
key = new Text("99");
|
||||
closest = (Text)reader.getClosest(key, value);
|
||||
assertNull(closest);
|
||||
|
||||
// If we were looking for the key before, we should get the last key
|
||||
closest = (Text)reader.getClosest(key, value, true);
|
||||
assertEquals(new Text("90"), closest);
|
||||
}
|
||||
|
||||
public void testMidKey() throws Exception {
|
||||
// Write a mapfile of simple data: keys are
|
||||
Path dirName = new Path(System.getProperty("test.build.data",".") +
|
||||
getName() + ".mapfile");
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
Path qualifiedDirName = fs.makeQualified(dirName);
|
||||
|
||||
MapFile.Writer writer = new MapFile.Writer(conf, fs,
|
||||
qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
|
||||
writer.append(new IntWritable(1), new IntWritable(1));
|
||||
writer.close();
|
||||
// Now do getClosest on created mapfile.
|
||||
MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
|
||||
conf);
|
||||
assertEquals(new IntWritable(1), reader.midKey());
|
||||
}
|
||||
|
||||
|
||||
public void testMidKeyEmpty() throws Exception {
|
||||
// Write a mapfile of simple data: keys are
|
||||
Path dirName = new Path(System.getProperty("test.build.data",".") +
|
||||
getName() + ".mapfile");
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
Path qualifiedDirName = fs.makeQualified(dirName);
|
||||
|
||||
MapFile.Writer writer = new MapFile.Writer(conf, fs,
|
||||
qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
|
||||
writer.close();
|
||||
// Now do getClosest on created mapfile.
|
||||
MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
|
||||
conf);
|
||||
assertEquals(null, reader.midKey());
|
||||
}
|
||||
}
|
132
src/test/org/apache/hadoop/io/TestMapWritable.java
Normal file
132
src/test/org/apache/hadoop/io/TestMapWritable.java
Normal file
@ -0,0 +1,132 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.util.Map;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/**
|
||||
* Tests MapWritable
|
||||
*/
|
||||
public class TestMapWritable extends TestCase {
|
||||
/** the test */
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testMapWritable() {
|
||||
Text[] keys = {
|
||||
new Text("key1"),
|
||||
new Text("key2"),
|
||||
new Text("Key3"),
|
||||
};
|
||||
|
||||
BytesWritable[] values = {
|
||||
new BytesWritable("value1".getBytes()),
|
||||
new BytesWritable("value2".getBytes()),
|
||||
new BytesWritable("value3".getBytes())
|
||||
};
|
||||
|
||||
MapWritable inMap = new MapWritable();
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
inMap.put(keys[i], values[i]);
|
||||
}
|
||||
|
||||
MapWritable outMap = new MapWritable(inMap);
|
||||
assertEquals(inMap.size(), outMap.size());
|
||||
|
||||
for (Map.Entry<Writable, Writable> e: inMap.entrySet()) {
|
||||
assertTrue(outMap.containsKey(e.getKey()));
|
||||
assertEquals(0, ((WritableComparable) outMap.get(e.getKey())).compareTo(
|
||||
e.getValue()));
|
||||
}
|
||||
|
||||
// Now for something a little harder...
|
||||
|
||||
Text[] maps = {
|
||||
new Text("map1"),
|
||||
new Text("map2")
|
||||
};
|
||||
|
||||
MapWritable mapOfMaps = new MapWritable();
|
||||
mapOfMaps.put(maps[0], inMap);
|
||||
mapOfMaps.put(maps[1], outMap);
|
||||
|
||||
MapWritable copyOfMapOfMaps = new MapWritable(mapOfMaps);
|
||||
for (int i = 0; i < maps.length; i++) {
|
||||
assertTrue(copyOfMapOfMaps.containsKey(maps[i]));
|
||||
MapWritable a = (MapWritable) mapOfMaps.get(maps[i]);
|
||||
MapWritable b = (MapWritable) copyOfMapOfMaps.get(maps[i]);
|
||||
assertEquals(a.size(), b.size());
|
||||
for (Writable key: a.keySet()) {
|
||||
assertTrue(b.containsKey(key));
|
||||
|
||||
// This will work because we know what we put into each set
|
||||
|
||||
WritableComparable aValue = (WritableComparable) a.get(key);
|
||||
WritableComparable bValue = (WritableComparable) b.get(key);
|
||||
assertEquals(0, aValue.compareTo(bValue));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that number of "unknown" classes is propagated across multiple copies.
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public void testForeignClass() {
|
||||
MapWritable inMap = new MapWritable();
|
||||
inMap.put(new Text("key"), new UTF8("value"));
|
||||
inMap.put(new Text("key2"), new UTF8("value2"));
|
||||
MapWritable outMap = new MapWritable(inMap);
|
||||
MapWritable copyOfCopy = new MapWritable(outMap);
|
||||
assertEquals(1, copyOfCopy.getNewClasses());
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert MapWritable does not grow across calls to readFields.
|
||||
* @throws Exception
|
||||
* @see <a href="https://issues.apache.org/jira/browse/HADOOP-2244">HADOOP-2244</a>
|
||||
*/
|
||||
public void testMultipleCallsToReadFieldsAreSafe() throws Exception {
|
||||
// Create an instance and add a key/value.
|
||||
MapWritable m = new MapWritable();
|
||||
final Text t = new Text(getName());
|
||||
m.put(t, t);
|
||||
// Get current size of map. Key values are 't'.
|
||||
int count = m.size();
|
||||
// Now serialize... save off the bytes.
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
DataOutputStream dos = new DataOutputStream(baos);
|
||||
m.write(dos);
|
||||
dos.close();
|
||||
// Now add new values to the MapWritable.
|
||||
m.put(new Text("key1"), new Text("value1"));
|
||||
m.put(new Text("key2"), new Text("value2"));
|
||||
// Now deserialize the original MapWritable. Ensure count and key values
|
||||
// match original state.
|
||||
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
|
||||
DataInputStream dis = new DataInputStream(bais);
|
||||
m.readFields(dis);
|
||||
assertEquals(count, m.size());
|
||||
assertTrue(m.get(t).equals(t));
|
||||
dis.close();
|
||||
}
|
||||
}
|
@ -0,0 +1,69 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.SequenceFile.Reader;
|
||||
import org.apache.hadoop.io.SequenceFile.Writer;
|
||||
|
||||
public class TestSequenceFileSerialization extends TestCase {
|
||||
|
||||
private Configuration conf;
|
||||
private FileSystem fs;
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
conf = new Configuration();
|
||||
conf.set("io.serializations",
|
||||
"org.apache.hadoop.io.serializer.JavaSerialization");
|
||||
fs = FileSystem.getLocal(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
fs.close();
|
||||
}
|
||||
|
||||
public void testJavaSerialization() throws Exception {
|
||||
Path file = new Path(System.getProperty("test.build.data",".") +
|
||||
"/test.seq");
|
||||
|
||||
fs.delete(file, true);
|
||||
Writer writer = SequenceFile.createWriter(fs, conf, file, Long.class,
|
||||
String.class);
|
||||
|
||||
writer.append(1L, "one");
|
||||
writer.append(2L, "two");
|
||||
|
||||
writer.close();
|
||||
|
||||
Reader reader = new Reader(fs, file, conf);
|
||||
assertEquals(1L, reader.next((Object) null));
|
||||
assertEquals("one", reader.getCurrentValue((Object) null));
|
||||
assertEquals(2L, reader.next((Object) null));
|
||||
assertEquals("two", reader.getCurrentValue((Object) null));
|
||||
assertNull(reader.next((Object) null));
|
||||
reader.close();
|
||||
|
||||
}
|
||||
}
|
157
src/test/org/apache/hadoop/io/TestSetFile.java
Normal file
157
src/test/org/apache/hadoop/io/TestSetFile.java
Normal file
@ -0,0 +1,157 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.*;
|
||||
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.conf.*;
|
||||
import org.apache.hadoop.io.SequenceFile.CompressionType;
|
||||
|
||||
/** Support for flat files of binary key/value pairs. */
|
||||
public class TestSetFile extends TestCase {
|
||||
private static final Log LOG = LogFactory.getLog(TestSetFile.class);
|
||||
private static String FILE =
|
||||
System.getProperty("test.build.data",".") + "/test.set";
|
||||
|
||||
private static Configuration conf = new Configuration();
|
||||
|
||||
public TestSetFile(String name) { super(name); }
|
||||
|
||||
public void testSetFile() throws Exception {
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
try {
|
||||
RandomDatum[] data = generate(10000);
|
||||
writeTest(fs, data, FILE, CompressionType.NONE);
|
||||
readTest(fs, data, FILE);
|
||||
|
||||
writeTest(fs, data, FILE, CompressionType.BLOCK);
|
||||
readTest(fs, data, FILE);
|
||||
} finally {
|
||||
fs.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static RandomDatum[] generate(int count) {
|
||||
LOG.info("generating " + count + " records in memory");
|
||||
RandomDatum[] data = new RandomDatum[count];
|
||||
RandomDatum.Generator generator = new RandomDatum.Generator();
|
||||
for (int i = 0; i < count; i++) {
|
||||
generator.next();
|
||||
data[i] = generator.getValue();
|
||||
}
|
||||
LOG.info("sorting " + count + " records");
|
||||
Arrays.sort(data);
|
||||
return data;
|
||||
}
|
||||
|
||||
private static void writeTest(FileSystem fs, RandomDatum[] data,
|
||||
String file, CompressionType compress)
|
||||
throws IOException {
|
||||
MapFile.delete(fs, file);
|
||||
LOG.info("creating with " + data.length + " records");
|
||||
SetFile.Writer writer =
|
||||
new SetFile.Writer(conf, fs, file,
|
||||
WritableComparator.get(RandomDatum.class),
|
||||
compress);
|
||||
for (int i = 0; i < data.length; i++)
|
||||
writer.append(data[i]);
|
||||
writer.close();
|
||||
}
|
||||
|
||||
private static void readTest(FileSystem fs, RandomDatum[] data, String file)
|
||||
throws IOException {
|
||||
RandomDatum v = new RandomDatum();
|
||||
int sample = (int)Math.sqrt(data.length);
|
||||
Random random = new Random();
|
||||
LOG.info("reading " + sample + " records");
|
||||
SetFile.Reader reader = new SetFile.Reader(fs, file, conf);
|
||||
for (int i = 0; i < sample; i++) {
|
||||
if (!reader.seek(data[random.nextInt(data.length)]))
|
||||
throw new RuntimeException("wrong value at " + i);
|
||||
}
|
||||
reader.close();
|
||||
LOG.info("done reading " + data.length);
|
||||
}
|
||||
|
||||
|
||||
/** For debugging and testing. */
|
||||
public static void main(String[] args) throws Exception {
|
||||
int count = 1024 * 1024;
|
||||
boolean create = true;
|
||||
boolean check = true;
|
||||
String file = FILE;
|
||||
String compress = "NONE";
|
||||
|
||||
String usage = "Usage: TestSetFile [-count N] [-nocreate] [-nocheck] [-compress type] file";
|
||||
|
||||
if (args.length == 0) {
|
||||
System.err.println(usage);
|
||||
System.exit(-1);
|
||||
}
|
||||
|
||||
int i = 0;
|
||||
Path fpath=null;
|
||||
FileSystem fs = null;
|
||||
try {
|
||||
for (; i < args.length; i++) { // parse command line
|
||||
if (args[i] == null) {
|
||||
continue;
|
||||
} else if (args[i].equals("-count")) {
|
||||
count = Integer.parseInt(args[++i]);
|
||||
} else if (args[i].equals("-nocreate")) {
|
||||
create = false;
|
||||
} else if (args[i].equals("-nocheck")) {
|
||||
check = false;
|
||||
} else if (args[i].equals("-compress")) {
|
||||
compress = args[++i];
|
||||
} else {
|
||||
// file is required parameter
|
||||
file = args[i];
|
||||
fpath=new Path(file);
|
||||
}
|
||||
}
|
||||
|
||||
fs = fpath.getFileSystem(conf);
|
||||
|
||||
LOG.info("count = " + count);
|
||||
LOG.info("create = " + create);
|
||||
LOG.info("check = " + check);
|
||||
LOG.info("compress = " + compress);
|
||||
LOG.info("file = " + file);
|
||||
|
||||
RandomDatum[] data = generate(count);
|
||||
|
||||
if (create) {
|
||||
writeTest(fs, data, file, CompressionType.valueOf(compress));
|
||||
}
|
||||
|
||||
if (check) {
|
||||
readTest(fs, data, file);
|
||||
}
|
||||
|
||||
} finally {
|
||||
fs.close();
|
||||
}
|
||||
}
|
||||
}
|
102
src/test/org/apache/hadoop/io/TestSortedMapWritable.java
Normal file
102
src/test/org/apache/hadoop/io/TestSortedMapWritable.java
Normal file
@ -0,0 +1,102 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/**
|
||||
* Tests SortedMapWritable
|
||||
*/
|
||||
public class TestSortedMapWritable extends TestCase {
|
||||
/** the test */
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testSortedMapWritable() {
|
||||
Text[] keys = {
|
||||
new Text("key1"),
|
||||
new Text("key2"),
|
||||
new Text("key3"),
|
||||
};
|
||||
|
||||
BytesWritable[] values = {
|
||||
new BytesWritable("value1".getBytes()),
|
||||
new BytesWritable("value2".getBytes()),
|
||||
new BytesWritable("value3".getBytes())
|
||||
};
|
||||
|
||||
SortedMapWritable inMap = new SortedMapWritable();
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
inMap.put(keys[i], values[i]);
|
||||
}
|
||||
|
||||
assertEquals(0, inMap.firstKey().compareTo(keys[0]));
|
||||
assertEquals(0, inMap.lastKey().compareTo(keys[2]));
|
||||
|
||||
SortedMapWritable outMap = new SortedMapWritable(inMap);
|
||||
assertEquals(inMap.size(), outMap.size());
|
||||
|
||||
for (Map.Entry<WritableComparable, Writable> e: inMap.entrySet()) {
|
||||
assertTrue(outMap.containsKey(e.getKey()));
|
||||
assertEquals(0, ((WritableComparable) outMap.get(e.getKey())).compareTo(
|
||||
e.getValue()));
|
||||
}
|
||||
|
||||
// Now for something a little harder...
|
||||
|
||||
Text[] maps = {
|
||||
new Text("map1"),
|
||||
new Text("map2")
|
||||
};
|
||||
|
||||
SortedMapWritable mapOfMaps = new SortedMapWritable();
|
||||
mapOfMaps.put(maps[0], inMap);
|
||||
mapOfMaps.put(maps[1], outMap);
|
||||
|
||||
SortedMapWritable copyOfMapOfMaps = new SortedMapWritable(mapOfMaps);
|
||||
for (int i = 0; i < maps.length; i++) {
|
||||
assertTrue(copyOfMapOfMaps.containsKey(maps[i]));
|
||||
|
||||
SortedMapWritable a = (SortedMapWritable) mapOfMaps.get(maps[i]);
|
||||
SortedMapWritable b = (SortedMapWritable) copyOfMapOfMaps.get(maps[i]);
|
||||
assertEquals(a.size(), b.size());
|
||||
for (Writable key: a.keySet()) {
|
||||
assertTrue(b.containsKey(key));
|
||||
|
||||
// This will work because we know what we put into each set
|
||||
|
||||
WritableComparable aValue = (WritableComparable) a.get(key);
|
||||
WritableComparable bValue = (WritableComparable) b.get(key);
|
||||
assertEquals(0, aValue.compareTo(bValue));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that number of "unknown" classes is propagated across multiple copies.
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public void testForeignClass() {
|
||||
SortedMapWritable inMap = new SortedMapWritable();
|
||||
inMap.put(new Text("key"), new UTF8("value"));
|
||||
inMap.put(new Text("key2"), new UTF8("value2"));
|
||||
SortedMapWritable outMap = new SortedMapWritable(inMap);
|
||||
SortedMapWritable copyOfCopy = new SortedMapWritable(outMap);
|
||||
assertEquals(1, copyOfCopy.getNewClasses());
|
||||
}
|
||||
}
|
266
src/test/org/apache/hadoop/io/TestText.java
Normal file
266
src/test/org/apache/hadoop/io/TestText.java
Normal file
@ -0,0 +1,266 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.CharacterCodingException;
|
||||
import java.util.Random;
|
||||
|
||||
/** Unit tests for LargeUTF8. */
|
||||
public class TestText extends TestCase {
|
||||
private static final int NUM_ITERATIONS = 100;
|
||||
public TestText(String name) { super(name); }
|
||||
|
||||
private static final Random RANDOM = new Random(1);
|
||||
|
||||
private static final int RAND_LEN = -1;
|
||||
|
||||
// generate a valid java String
|
||||
private static String getTestString(int len) throws Exception {
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
int length = (len==RAND_LEN) ? RANDOM.nextInt(1000) : len;
|
||||
while (buffer.length()<length) {
|
||||
int codePoint = RANDOM.nextInt(Character.MAX_CODE_POINT);
|
||||
char tmpStr[] = new char[2];
|
||||
if (Character.isDefined(codePoint)) {
|
||||
//unpaired surrogate
|
||||
if (codePoint < Character.MIN_SUPPLEMENTARY_CODE_POINT &&
|
||||
!Character.isHighSurrogate((char)codePoint) &&
|
||||
!Character.isLowSurrogate((char)codePoint)) {
|
||||
Character.toChars(codePoint, tmpStr, 0);
|
||||
buffer.append(tmpStr);
|
||||
}
|
||||
}
|
||||
}
|
||||
return buffer.toString();
|
||||
}
|
||||
|
||||
public static String getTestString() throws Exception {
|
||||
return getTestString(RAND_LEN);
|
||||
}
|
||||
|
||||
public static String getLongString() throws Exception {
|
||||
String str = getTestString();
|
||||
int length = Short.MAX_VALUE+str.length();
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
while(buffer.length()<length)
|
||||
buffer.append(str);
|
||||
|
||||
return buffer.toString();
|
||||
}
|
||||
|
||||
public void testWritable() throws Exception {
|
||||
for (int i = 0; i < NUM_ITERATIONS; i++) {
|
||||
String str;
|
||||
if (i == 0)
|
||||
str = getLongString();
|
||||
else
|
||||
str = getTestString();
|
||||
TestWritable.testWritable(new Text(str));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testCoding() throws Exception {
|
||||
String before = "Bad \t encoding \t testcase";
|
||||
Text text = new Text(before);
|
||||
String after = text.toString();
|
||||
assertTrue(before.equals(after));
|
||||
|
||||
for (int i = 0; i < NUM_ITERATIONS; i++) {
|
||||
// generate a random string
|
||||
if (i == 0)
|
||||
before = getLongString();
|
||||
else
|
||||
before = getTestString();
|
||||
|
||||
// test string to utf8
|
||||
ByteBuffer bb = Text.encode(before);
|
||||
|
||||
byte[] utf8Text = bb.array();
|
||||
byte[] utf8Java = before.getBytes("UTF-8");
|
||||
assertEquals(0, WritableComparator.compareBytes(
|
||||
utf8Text, 0, bb.limit(),
|
||||
utf8Java, 0, utf8Java.length));
|
||||
|
||||
// test utf8 to string
|
||||
after = Text.decode(utf8Java);
|
||||
assertTrue(before.equals(after));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testIO() throws Exception {
|
||||
DataOutputBuffer out = new DataOutputBuffer();
|
||||
DataInputBuffer in = new DataInputBuffer();
|
||||
|
||||
for (int i = 0; i < NUM_ITERATIONS; i++) {
|
||||
// generate a random string
|
||||
String before;
|
||||
if (i == 0)
|
||||
before = getLongString();
|
||||
else
|
||||
before = getTestString();
|
||||
|
||||
// write it
|
||||
out.reset();
|
||||
Text.writeString(out, before);
|
||||
|
||||
// test that it reads correctly
|
||||
in.reset(out.getData(), out.getLength());
|
||||
String after = Text.readString(in);
|
||||
assertTrue(before.equals(after));
|
||||
|
||||
// Test compatibility with Java's other decoder
|
||||
int strLenSize = WritableUtils.getVIntSize(Text.utf8Length(before));
|
||||
String after2 = new String(out.getData(), strLenSize,
|
||||
out.getLength()-strLenSize, "UTF-8");
|
||||
assertTrue(before.equals(after2));
|
||||
}
|
||||
}
|
||||
|
||||
public void testCompare() throws Exception {
|
||||
DataOutputBuffer out1 = new DataOutputBuffer();
|
||||
DataOutputBuffer out2 = new DataOutputBuffer();
|
||||
DataOutputBuffer out3 = new DataOutputBuffer();
|
||||
Text.Comparator comparator = new Text.Comparator();
|
||||
for (int i=0; i<NUM_ITERATIONS; i++) {
|
||||
// reset output buffer
|
||||
out1.reset();
|
||||
out2.reset();
|
||||
out3.reset();
|
||||
|
||||
// generate two random strings
|
||||
String str1 = getTestString();
|
||||
String str2 = getTestString();
|
||||
if (i == 0) {
|
||||
str1 = getLongString();
|
||||
str2 = getLongString();
|
||||
} else {
|
||||
str1 = getTestString();
|
||||
str2 = getTestString();
|
||||
}
|
||||
|
||||
// convert to texts
|
||||
Text txt1 = new Text(str1);
|
||||
Text txt2 = new Text(str2);
|
||||
Text txt3 = new Text(str1);
|
||||
|
||||
// serialize them
|
||||
txt1.write(out1);
|
||||
txt2.write(out2);
|
||||
txt3.write(out3);
|
||||
|
||||
// compare two strings by looking at their binary formats
|
||||
int ret1 = comparator.compare(out1.getData(), 0, out1.getLength(),
|
||||
out2.getData(), 0, out2.getLength());
|
||||
// compare two strings
|
||||
int ret2 = txt1.compareTo(txt2);
|
||||
|
||||
assertEquals(ret1, ret2);
|
||||
|
||||
// test equal
|
||||
assertEquals(txt1.compareTo(txt3), 0);
|
||||
assertEquals(comparator.compare(out1.getData(), 0, out3.getLength(),
|
||||
out3.getData(), 0, out3.getLength()), 0);
|
||||
}
|
||||
}
|
||||
|
||||
public void testFind() throws Exception {
|
||||
Text text = new Text("abcd\u20acbdcd\u20ac");
|
||||
assertTrue(text.find("abd")==-1);
|
||||
assertTrue(text.find("ac")==-1);
|
||||
assertTrue(text.find("\u20ac")==4);
|
||||
assertTrue(text.find("\u20ac", 5)==11);
|
||||
}
|
||||
|
||||
public void testFindAfterUpdatingContents() throws Exception {
|
||||
Text text = new Text("abcd");
|
||||
text.set("a".getBytes());
|
||||
assertEquals(text.getLength(),1);
|
||||
assertEquals(text.find("a"), 0);
|
||||
assertEquals(text.find("b"), -1);
|
||||
}
|
||||
|
||||
public void testValidate() throws Exception {
|
||||
Text text = new Text("abcd\u20acbdcd\u20ac");
|
||||
byte [] utf8 = text.getBytes();
|
||||
int length = text.getLength();
|
||||
Text.validateUTF8(utf8, 0, length);
|
||||
}
|
||||
|
||||
public void testTextText() throws CharacterCodingException {
|
||||
Text a=new Text("abc");
|
||||
Text b=new Text("a");
|
||||
b.set(a);
|
||||
assertEquals("abc", b.toString());
|
||||
a.append("xdefgxxx".getBytes(), 1, 4);
|
||||
assertEquals("modified aliased string", "abc", b.toString());
|
||||
assertEquals("appended string incorrectly", "abcdefg", a.toString());
|
||||
}
|
||||
|
||||
private class ConcurrentEncodeDecodeThread extends Thread {
|
||||
public ConcurrentEncodeDecodeThread(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
public void run() {
|
||||
String name = this.getName();
|
||||
DataOutputBuffer out = new DataOutputBuffer();
|
||||
DataInputBuffer in = new DataInputBuffer();
|
||||
for (int i=0; i < 1000; ++i) {
|
||||
try {
|
||||
out.reset();
|
||||
WritableUtils.writeString(out, name);
|
||||
|
||||
in.reset(out.getData(), out.getLength());
|
||||
String s = WritableUtils.readString(in);
|
||||
|
||||
assertEquals(name, s);
|
||||
} catch (Exception ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testConcurrentEncodeDecode() throws Exception{
|
||||
Thread thread1 = new ConcurrentEncodeDecodeThread("apache");
|
||||
Thread thread2 = new ConcurrentEncodeDecodeThread("hadoop");
|
||||
|
||||
thread1.start();
|
||||
thread2.start();
|
||||
|
||||
thread2.join();
|
||||
thread2.join();
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception
|
||||
{
|
||||
TestText test = new TestText("main");
|
||||
test.testIO();
|
||||
test.testCompare();
|
||||
test.testCoding();
|
||||
test.testWritable();
|
||||
test.testFind();
|
||||
test.testValidate();
|
||||
}
|
||||
}
|
56
src/test/org/apache/hadoop/io/TestTextNonUTF8.java
Normal file
56
src/test/org/apache/hadoop/io/TestTextNonUTF8.java
Normal file
@ -0,0 +1,56 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.nio.charset.MalformedInputException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.*;
|
||||
import java.util.Arrays;
|
||||
|
||||
/** Unit tests for NonUTF8. */
|
||||
public class TestTextNonUTF8 extends TestCase {
|
||||
private static final Log LOG= LogFactory.getLog(TestTextNonUTF8.class);
|
||||
|
||||
public void testNonUTF8() throws Exception{
|
||||
// this is a non UTF8 byte array
|
||||
byte b[] = {-0x01, -0x01, -0x01, -0x01, -0x01, -0x01, -0x01};
|
||||
boolean nonUTF8 = false;
|
||||
Text t = new Text(b);
|
||||
try{
|
||||
Text.validateUTF8(b);
|
||||
}catch(MalformedInputException me){
|
||||
nonUTF8 = false;
|
||||
}
|
||||
// asserting that the byte array is non utf8
|
||||
assertFalse(nonUTF8);
|
||||
byte ret[] = t.getBytes();
|
||||
// asseting that the byte array are the same when the Text
|
||||
// object is created.
|
||||
assertTrue(Arrays.equals(b, ret));
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception
|
||||
{
|
||||
TestTextNonUTF8 test = new TestTextNonUTF8();
|
||||
test.testNonUTF8();
|
||||
}
|
||||
}
|
86
src/test/org/apache/hadoop/io/TestUTF8.java
Normal file
86
src/test/org/apache/hadoop/io/TestUTF8.java
Normal file
@ -0,0 +1,86 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import java.util.Random;
|
||||
|
||||
/** Unit tests for UTF8. */
|
||||
public class TestUTF8 extends TestCase {
|
||||
public TestUTF8(String name) { super(name); }
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
public static String getTestString() throws Exception {
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
int length = RANDOM.nextInt(100);
|
||||
for (int i = 0; i < length; i++) {
|
||||
buffer.append((char)(RANDOM.nextInt(Character.MAX_VALUE)));
|
||||
}
|
||||
return buffer.toString();
|
||||
}
|
||||
|
||||
public void testWritable() throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
TestWritable.testWritable(new UTF8(getTestString()));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetBytes() throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
|
||||
// generate a random string
|
||||
String before = getTestString();
|
||||
|
||||
// check its utf8
|
||||
assertEquals(before, new String(UTF8.getBytes(before), "UTF-8"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testIO() throws Exception {
|
||||
DataOutputBuffer out = new DataOutputBuffer();
|
||||
DataInputBuffer in = new DataInputBuffer();
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
// generate a random string
|
||||
String before = getTestString();
|
||||
|
||||
// write it
|
||||
out.reset();
|
||||
UTF8.writeString(out, before);
|
||||
|
||||
// test that it reads correctly
|
||||
in.reset(out.getData(), out.getLength());
|
||||
String after = UTF8.readString(in);
|
||||
assertTrue(before.equals(after));
|
||||
|
||||
// test that it reads correctly with DataInput
|
||||
in.reset(out.getData(), out.getLength());
|
||||
String after2 = in.readUTF();
|
||||
assertTrue(before.equals(after2));
|
||||
|
||||
// test that it is compatible with Java's other decoder
|
||||
String after3 = new String(out.getData(), 2, out.getLength()-2, "UTF-8");
|
||||
assertTrue(before.equals(after3));
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
179
src/test/org/apache/hadoop/io/TestVersionedWritable.java
Normal file
179
src/test/org/apache/hadoop/io/TestVersionedWritable.java
Normal file
@ -0,0 +1,179 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.Random;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/** Unit tests for VersionedWritable. */
|
||||
|
||||
public class TestVersionedWritable extends TestCase {
|
||||
|
||||
public TestVersionedWritable(String name) { super(name); }
|
||||
|
||||
|
||||
/** Example class used in test cases below. */
|
||||
public static class SimpleVersionedWritable extends VersionedWritable {
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
int state = RANDOM.nextInt();
|
||||
|
||||
|
||||
private static byte VERSION = 1;
|
||||
public byte getVersion() {
|
||||
return VERSION;
|
||||
}
|
||||
|
||||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out); // version.
|
||||
out.writeInt(state);
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in); // version
|
||||
this.state = in.readInt();
|
||||
}
|
||||
|
||||
|
||||
public static SimpleVersionedWritable read(DataInput in) throws IOException {
|
||||
SimpleVersionedWritable result = new SimpleVersionedWritable();
|
||||
result.readFields(in);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/** Required by test code, below. */
|
||||
public boolean equals(Object o) {
|
||||
if (!(o instanceof SimpleVersionedWritable))
|
||||
return false;
|
||||
SimpleVersionedWritable other = (SimpleVersionedWritable)o;
|
||||
return this.state == other.state;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
public static class AdvancedVersionedWritable extends SimpleVersionedWritable {
|
||||
|
||||
String shortTestString = "Now is the time for all good men to come to the aid of the Party";
|
||||
String longTestString = "Four score and twenty years ago. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah.";
|
||||
|
||||
String compressableTestString =
|
||||
"Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. " +
|
||||
"Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. " +
|
||||
"Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. ";
|
||||
|
||||
SimpleVersionedWritable containedObject = new SimpleVersionedWritable();
|
||||
String[] testStringArray = {"The", "Quick", "Brown", "Fox", "Jumped", "Over", "The", "Lazy", "Dog"};
|
||||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out);
|
||||
out.writeUTF(shortTestString);
|
||||
WritableUtils.writeString(out, longTestString);
|
||||
int comp = WritableUtils.writeCompressedString(out, compressableTestString);
|
||||
System.out.println("Compression is " + comp + "%");
|
||||
containedObject.write(out); // Warning if this is a recursive call, you need a null value.
|
||||
WritableUtils.writeStringArray(out, testStringArray);
|
||||
|
||||
}
|
||||
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in);
|
||||
shortTestString = in.readUTF();
|
||||
longTestString = WritableUtils.readString(in);
|
||||
compressableTestString = WritableUtils.readCompressedString(in);
|
||||
containedObject.readFields(in); // Warning if this is a recursive call, you need a null value.
|
||||
testStringArray = WritableUtils.readStringArray(in);
|
||||
}
|
||||
|
||||
|
||||
|
||||
public boolean equals(Object o) {
|
||||
super.equals(o);
|
||||
|
||||
if (!shortTestString.equals(((AdvancedVersionedWritable)o).shortTestString)) { return false;}
|
||||
if (!longTestString.equals(((AdvancedVersionedWritable)o).longTestString)) { return false;}
|
||||
if (!compressableTestString.equals(((AdvancedVersionedWritable)o).compressableTestString)) { return false;}
|
||||
|
||||
if (testStringArray.length != ((AdvancedVersionedWritable)o).testStringArray.length) { return false;}
|
||||
for(int i=0;i< testStringArray.length;i++){
|
||||
if (!testStringArray[i].equals(((AdvancedVersionedWritable)o).testStringArray[i])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!containedObject.equals(((AdvancedVersionedWritable)o).containedObject)) { return false;}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
/* This one checks that version mismatch is thrown... */
|
||||
public static class SimpleVersionedWritableV2 extends SimpleVersionedWritable {
|
||||
static byte VERSION = 2;
|
||||
public byte getVersion() {
|
||||
return VERSION;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** Test 1: Check that SimpleVersionedWritable. */
|
||||
public void testSimpleVersionedWritable() throws Exception {
|
||||
TestWritable.testWritable(new SimpleVersionedWritable());
|
||||
}
|
||||
|
||||
/** Test 2: Check that AdvancedVersionedWritable Works (well, why wouldn't it!). */
|
||||
public void testAdvancedVersionedWritable() throws Exception {
|
||||
TestWritable.testWritable(new AdvancedVersionedWritable());
|
||||
}
|
||||
|
||||
/** Test 3: Check that SimpleVersionedWritable throws an Exception. */
|
||||
public void testSimpleVersionedWritableMismatch() throws Exception {
|
||||
TestVersionedWritable.testVersionedWritable(new SimpleVersionedWritable(), new SimpleVersionedWritableV2());
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/** Utility method for testing VersionedWritables. */
|
||||
public static void testVersionedWritable(Writable before, Writable after) throws Exception {
|
||||
DataOutputBuffer dob = new DataOutputBuffer();
|
||||
before.write(dob);
|
||||
|
||||
DataInputBuffer dib = new DataInputBuffer();
|
||||
dib.reset(dob.getData(), dob.getLength());
|
||||
|
||||
try {
|
||||
after.readFields(dib);
|
||||
} catch (VersionMismatchException vmme) {
|
||||
System.out.println("Good, we expected this:" + vmme);
|
||||
return;
|
||||
}
|
||||
|
||||
throw new Exception("A Version Mismatch Didn't Happen!");
|
||||
}
|
||||
}
|
||||
|
99
src/test/org/apache/hadoop/io/TestWritable.java
Normal file
99
src/test/org/apache/hadoop/io/TestWritable.java
Normal file
@ -0,0 +1,99 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/** Unit tests for Writable. */
|
||||
public class TestWritable extends TestCase {
|
||||
public TestWritable(String name) { super(name); }
|
||||
|
||||
/** Example class used in test cases below. */
|
||||
public static class SimpleWritable implements Writable {
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
int state = RANDOM.nextInt();
|
||||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(state);
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
this.state = in.readInt();
|
||||
}
|
||||
|
||||
public static SimpleWritable read(DataInput in) throws IOException {
|
||||
SimpleWritable result = new SimpleWritable();
|
||||
result.readFields(in);
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Required by test code, below. */
|
||||
public boolean equals(Object o) {
|
||||
if (!(o instanceof SimpleWritable))
|
||||
return false;
|
||||
SimpleWritable other = (SimpleWritable)o;
|
||||
return this.state == other.state;
|
||||
}
|
||||
}
|
||||
|
||||
/** Test 1: Check that SimpleWritable. */
|
||||
public void testSimpleWritable() throws Exception {
|
||||
testWritable(new SimpleWritable());
|
||||
}
|
||||
|
||||
public void testByteWritable() throws Exception {
|
||||
testWritable(new ByteWritable((byte)128));
|
||||
}
|
||||
|
||||
public void testDoubleWritable() throws Exception {
|
||||
testWritable(new DoubleWritable(1.0));
|
||||
}
|
||||
|
||||
/** Utility method for testing writables. */
|
||||
public static Writable testWritable(Writable before)
|
||||
throws Exception {
|
||||
return testWritable(before, null);
|
||||
}
|
||||
|
||||
/** Utility method for testing writables. */
|
||||
public static Writable testWritable(Writable before
|
||||
, Configuration conf) throws Exception {
|
||||
DataOutputBuffer dob = new DataOutputBuffer();
|
||||
before.write(dob);
|
||||
|
||||
DataInputBuffer dib = new DataInputBuffer();
|
||||
dib.reset(dob.getData(), dob.getLength());
|
||||
|
||||
Writable after = (Writable)ReflectionUtils.newInstance(
|
||||
before.getClass(), conf);
|
||||
after.readFields(dib);
|
||||
|
||||
assertEquals(before, after);
|
||||
return after;
|
||||
}
|
||||
|
||||
}
|
107
src/test/org/apache/hadoop/io/TestWritableName.java
Normal file
107
src/test/org/apache/hadoop/io/TestWritableName.java
Normal file
@ -0,0 +1,107 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/** Unit tests for WritableName. */
|
||||
public class TestWritableName extends TestCase {
|
||||
public TestWritableName(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
/** Example class used in test cases below. */
|
||||
public static class SimpleWritable implements Writable {
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
int state = RANDOM.nextInt();
|
||||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(state);
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
this.state = in.readInt();
|
||||
}
|
||||
|
||||
public static SimpleWritable read(DataInput in) throws IOException {
|
||||
SimpleWritable result = new SimpleWritable();
|
||||
result.readFields(in);
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Required by test code, below. */
|
||||
public boolean equals(Object o) {
|
||||
if (!(o instanceof SimpleWritable))
|
||||
return false;
|
||||
SimpleWritable other = (SimpleWritable)o;
|
||||
return this.state == other.state;
|
||||
}
|
||||
}
|
||||
|
||||
private static final String testName = "mystring";
|
||||
|
||||
public void testGoodName() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
Class<?> test = WritableName.getClass("long",conf);
|
||||
assertTrue(test != null);
|
||||
}
|
||||
|
||||
public void testSetName() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
WritableName.setName(SimpleWritable.class, testName);
|
||||
|
||||
Class<?> test = WritableName.getClass(testName,conf);
|
||||
assertTrue(test.equals(SimpleWritable.class));
|
||||
}
|
||||
|
||||
|
||||
public void testAddName() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
String altName = testName + ".alt";
|
||||
|
||||
WritableName.addName(SimpleWritable.class, altName);
|
||||
|
||||
Class<?> test = WritableName.getClass(altName, conf);
|
||||
assertTrue(test.equals(SimpleWritable.class));
|
||||
|
||||
// check original name still works
|
||||
test = WritableName.getClass(testName, conf);
|
||||
assertTrue(test.equals(SimpleWritable.class));
|
||||
|
||||
}
|
||||
|
||||
public void testBadName() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
try {
|
||||
Class<?> test = WritableName.getClass("unknown_junk",conf);
|
||||
assertTrue(false);
|
||||
} catch(IOException e) {
|
||||
assertTrue(e.getMessage().matches(".*unknown_junk.*"));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
65
src/test/org/apache/hadoop/io/TestWritableUtils.java
Normal file
65
src/test/org/apache/hadoop/io/TestWritableUtils.java
Normal file
@ -0,0 +1,65 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestWritableUtils extends TestCase {
|
||||
private static final Log LOG = LogFactory.getLog(TestWritableUtils.class);
|
||||
|
||||
public static void testValue(int val, int vintlen) throws IOException {
|
||||
DataOutputBuffer buf = new DataOutputBuffer();
|
||||
DataInputBuffer inbuf = new DataInputBuffer();
|
||||
WritableUtils.writeVInt(buf, val);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Value = " + val);
|
||||
BytesWritable printer = new BytesWritable();
|
||||
printer.set(buf.getData(), 0, buf.getLength());
|
||||
LOG.debug("Buffer = " + printer);
|
||||
}
|
||||
inbuf.reset(buf.getData(), 0, buf.getLength());
|
||||
assertEquals(val, WritableUtils.readVInt(inbuf));
|
||||
assertEquals(vintlen, buf.getLength());
|
||||
assertEquals(vintlen, WritableUtils.getVIntSize(val));
|
||||
assertEquals(vintlen, WritableUtils.decodeVIntSize(buf.getData()[0]));
|
||||
}
|
||||
|
||||
public static void testVInt() throws Exception {
|
||||
testValue(12, 1);
|
||||
testValue(127, 1);
|
||||
testValue(-112, 1);
|
||||
testValue(-113, 2);
|
||||
testValue(-128, 2);
|
||||
testValue(128, 2);
|
||||
testValue(-129, 2);
|
||||
testValue(255, 2);
|
||||
testValue(-256, 2);
|
||||
testValue(256, 3);
|
||||
testValue(-257, 3);
|
||||
testValue(65535, 3);
|
||||
testValue(-65536, 3);
|
||||
testValue(65536, 4);
|
||||
testValue(-65537, 4);
|
||||
}
|
||||
}
|
249
src/test/org/apache/hadoop/io/compress/TestCodec.java
Normal file
249
src/test/org/apache/hadoop/io/compress/TestCodec.java
Normal file
@ -0,0 +1,249 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.compress;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.DataInputBuffer;
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import org.apache.hadoop.io.RandomDatum;
|
||||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.io.SequenceFile.CompressionType;
|
||||
import org.apache.hadoop.io.compress.CompressionOutputStream;
|
||||
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
|
||||
|
||||
public class TestCodec extends TestCase {
|
||||
|
||||
private static final Log LOG=
|
||||
LogFactory.getLog(TestCodec.class);
|
||||
|
||||
private Configuration conf = new Configuration();
|
||||
private int count = 10000;
|
||||
private int seed = new Random().nextInt();
|
||||
|
||||
public void testDefaultCodec() throws IOException {
|
||||
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.DefaultCodec");
|
||||
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.DefaultCodec");
|
||||
}
|
||||
|
||||
public void testGzipCodec() throws IOException {
|
||||
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
|
||||
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
|
||||
}
|
||||
|
||||
public void testBZip2Codec() throws IOException {
|
||||
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec");
|
||||
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.BZip2Codec");
|
||||
}
|
||||
|
||||
private static void codecTest(Configuration conf, int seed, int count,
|
||||
String codecClass)
|
||||
throws IOException {
|
||||
|
||||
// Create the codec
|
||||
CompressionCodec codec = null;
|
||||
try {
|
||||
codec = (CompressionCodec)
|
||||
ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
|
||||
} catch (ClassNotFoundException cnfe) {
|
||||
throw new IOException("Illegal codec!");
|
||||
}
|
||||
LOG.info("Created a Codec object of type: " + codecClass);
|
||||
|
||||
// Generate data
|
||||
DataOutputBuffer data = new DataOutputBuffer();
|
||||
RandomDatum.Generator generator = new RandomDatum.Generator(seed);
|
||||
for(int i=0; i < count; ++i) {
|
||||
generator.next();
|
||||
RandomDatum key = generator.getKey();
|
||||
RandomDatum value = generator.getValue();
|
||||
|
||||
key.write(data);
|
||||
value.write(data);
|
||||
}
|
||||
DataInputBuffer originalData = new DataInputBuffer();
|
||||
DataInputStream originalIn = new DataInputStream(new BufferedInputStream(originalData));
|
||||
originalData.reset(data.getData(), 0, data.getLength());
|
||||
|
||||
LOG.info("Generated " + count + " records");
|
||||
|
||||
// Compress data
|
||||
DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
|
||||
CompressionOutputStream deflateFilter =
|
||||
codec.createOutputStream(compressedDataBuffer);
|
||||
DataOutputStream deflateOut =
|
||||
new DataOutputStream(new BufferedOutputStream(deflateFilter));
|
||||
deflateOut.write(data.getData(), 0, data.getLength());
|
||||
deflateOut.flush();
|
||||
deflateFilter.finish();
|
||||
LOG.info("Finished compressing data");
|
||||
|
||||
// De-compress data
|
||||
DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
|
||||
deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
|
||||
compressedDataBuffer.getLength());
|
||||
CompressionInputStream inflateFilter =
|
||||
codec.createInputStream(deCompressedDataBuffer);
|
||||
DataInputStream inflateIn =
|
||||
new DataInputStream(new BufferedInputStream(inflateFilter));
|
||||
|
||||
// Check
|
||||
for(int i=0; i < count; ++i) {
|
||||
RandomDatum k1 = new RandomDatum();
|
||||
RandomDatum v1 = new RandomDatum();
|
||||
k1.readFields(originalIn);
|
||||
v1.readFields(originalIn);
|
||||
|
||||
RandomDatum k2 = new RandomDatum();
|
||||
RandomDatum v2 = new RandomDatum();
|
||||
k2.readFields(inflateIn);
|
||||
v2.readFields(inflateIn);
|
||||
}
|
||||
LOG.info("SUCCESS! Completed checking " + count + " records");
|
||||
}
|
||||
|
||||
public void testCodecPoolGzipReuse() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setBoolean("hadoop.native.lib", true);
|
||||
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
|
||||
LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
|
||||
return;
|
||||
}
|
||||
GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
|
||||
DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
|
||||
Compressor c1 = CodecPool.getCompressor(gzc);
|
||||
Compressor c2 = CodecPool.getCompressor(dfc);
|
||||
CodecPool.returnCompressor(c1);
|
||||
CodecPool.returnCompressor(c2);
|
||||
assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc));
|
||||
}
|
||||
|
||||
public void testSequenceFileDefaultCodec() throws IOException, ClassNotFoundException,
|
||||
InstantiationException, IllegalAccessException {
|
||||
sequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.DefaultCodec", 100);
|
||||
sequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.DefaultCodec", 1000000);
|
||||
}
|
||||
|
||||
public void testSequenceFileBZip2Codec() throws IOException, ClassNotFoundException,
|
||||
InstantiationException, IllegalAccessException {
|
||||
sequenceFileCodecTest(conf, 0, "org.apache.hadoop.io.compress.BZip2Codec", 100);
|
||||
sequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.BZip2Codec", 100);
|
||||
sequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.BZip2Codec", 1000000);
|
||||
}
|
||||
|
||||
private static void sequenceFileCodecTest(Configuration conf, int lines,
|
||||
String codecClass, int blockSize)
|
||||
throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException {
|
||||
|
||||
Path filePath = new Path("SequenceFileCodecTest." + codecClass);
|
||||
// Configuration
|
||||
conf.setInt("io.seqfile.compress.blocksize", blockSize);
|
||||
|
||||
// Create the SequenceFile
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
LOG.info("Creating SequenceFile with codec \"" + codecClass + "\"");
|
||||
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, filePath,
|
||||
Text.class, Text.class, CompressionType.BLOCK,
|
||||
(CompressionCodec)Class.forName(codecClass).newInstance());
|
||||
|
||||
// Write some data
|
||||
LOG.info("Writing to SequenceFile...");
|
||||
for (int i=0; i<lines; i++) {
|
||||
Text key = new Text("key" + i);
|
||||
Text value = new Text("value" + i);
|
||||
writer.append(key, value);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// Read the data back and check
|
||||
LOG.info("Reading from the SequenceFile...");
|
||||
SequenceFile.Reader reader = new SequenceFile.Reader(fs, filePath, conf);
|
||||
|
||||
Writable key = (Writable)reader.getKeyClass().newInstance();
|
||||
Writable value = (Writable)reader.getValueClass().newInstance();
|
||||
|
||||
int lc = 0;
|
||||
try {
|
||||
while (reader.next(key, value)) {
|
||||
assertEquals("key" + lc, key.toString());
|
||||
assertEquals("value" + lc, value.toString());
|
||||
lc ++;
|
||||
}
|
||||
} finally {
|
||||
reader.close();
|
||||
}
|
||||
assertEquals(lines, lc);
|
||||
|
||||
// Delete temporary files
|
||||
fs.delete(filePath, false);
|
||||
|
||||
LOG.info("SUCCESS! Completed SequenceFileCodecTest with codec \"" + codecClass + "\"");
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
int count = 10000;
|
||||
String codecClass = "org.apache.hadoop.io.compress.DefaultCodec";
|
||||
|
||||
String usage = "TestCodec [-count N] [-codec <codec class>]";
|
||||
if (args.length == 0) {
|
||||
System.err.println(usage);
|
||||
System.exit(-1);
|
||||
}
|
||||
|
||||
try {
|
||||
for (int i=0; i < args.length; ++i) { // parse command line
|
||||
if (args[i] == null) {
|
||||
continue;
|
||||
} else if (args[i].equals("-count")) {
|
||||
count = Integer.parseInt(args[++i]);
|
||||
} else if (args[i].equals("-codec")) {
|
||||
codecClass = args[++i];
|
||||
}
|
||||
}
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
int seed = 0;
|
||||
codecTest(conf, seed, count, codecClass);
|
||||
} catch (Exception e) {
|
||||
System.err.println("Caught: " + e);
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public TestCodec(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
}
|
151
src/test/org/apache/hadoop/io/compress/TestCodecFactory.java
Normal file
151
src/test/org/apache/hadoop/io/compress/TestCodecFactory.java
Normal file
@ -0,0 +1,151 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io.compress;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.*;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
public class TestCodecFactory extends TestCase {
|
||||
|
||||
private static class BaseCodec implements CompressionCodec {
|
||||
private Configuration conf;
|
||||
|
||||
public void setConf(Configuration conf) {
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
public Configuration getConf() {
|
||||
return conf;
|
||||
}
|
||||
|
||||
public CompressionOutputStream createOutputStream(OutputStream out)
|
||||
throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Class<? extends Compressor> getCompressorType() {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Compressor createCompressor() {
|
||||
return null;
|
||||
}
|
||||
|
||||
public CompressionInputStream createInputStream(InputStream in,
|
||||
Decompressor decompressor)
|
||||
throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
public CompressionInputStream createInputStream(InputStream in)
|
||||
throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
public CompressionOutputStream createOutputStream(OutputStream out,
|
||||
Compressor compressor)
|
||||
throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Class<? extends Decompressor> getDecompressorType() {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Decompressor createDecompressor() {
|
||||
return null;
|
||||
}
|
||||
|
||||
public String getDefaultExtension() {
|
||||
return ".base";
|
||||
}
|
||||
}
|
||||
|
||||
private static class BarCodec extends BaseCodec {
|
||||
public String getDefaultExtension() {
|
||||
return "bar";
|
||||
}
|
||||
}
|
||||
|
||||
private static class FooBarCodec extends BaseCodec {
|
||||
public String getDefaultExtension() {
|
||||
return ".foo.bar";
|
||||
}
|
||||
}
|
||||
|
||||
private static class FooCodec extends BaseCodec {
|
||||
public String getDefaultExtension() {
|
||||
return ".foo";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a factory for a given set of codecs
|
||||
* @param classes the codec classes to include
|
||||
* @return a new factory
|
||||
*/
|
||||
private static CompressionCodecFactory setClasses(Class[] classes) {
|
||||
Configuration conf = new Configuration();
|
||||
CompressionCodecFactory.setCodecClasses(conf, Arrays.asList(classes));
|
||||
return new CompressionCodecFactory(conf);
|
||||
}
|
||||
|
||||
private static void checkCodec(String msg,
|
||||
Class expected, CompressionCodec actual) {
|
||||
assertEquals(msg + " unexpected codec found",
|
||||
expected.getName(),
|
||||
actual.getClass().getName());
|
||||
}
|
||||
|
||||
public static void testFinding() {
|
||||
CompressionCodecFactory factory =
|
||||
new CompressionCodecFactory(new Configuration());
|
||||
CompressionCodec codec = factory.getCodec(new Path("/tmp/foo.bar"));
|
||||
assertEquals("default factory foo codec", null, codec);
|
||||
codec = factory.getCodec(new Path("/tmp/foo.gz"));
|
||||
checkCodec("default factory for .gz", GzipCodec.class, codec);
|
||||
codec = factory.getCodec(new Path("/tmp/foo.bz2"));
|
||||
checkCodec("default factory for .bz2", BZip2Codec.class, codec);
|
||||
factory = setClasses(new Class[0]);
|
||||
codec = factory.getCodec(new Path("/tmp/foo.bar"));
|
||||
assertEquals("empty codec bar codec", null, codec);
|
||||
codec = factory.getCodec(new Path("/tmp/foo.gz"));
|
||||
assertEquals("empty codec gz codec", null, codec);
|
||||
codec = factory.getCodec(new Path("/tmp/foo.bz2"));
|
||||
assertEquals("default factory for .bz2", null, codec);
|
||||
factory = setClasses(new Class[]{BarCodec.class, FooCodec.class,
|
||||
FooBarCodec.class});
|
||||
codec = factory.getCodec(new Path("/tmp/.foo.bar.gz"));
|
||||
assertEquals("full factory gz codec", null, codec);
|
||||
codec = factory.getCodec(new Path("/tmp/foo.bz2"));
|
||||
assertEquals("default factory for .bz2", null, codec);
|
||||
codec = factory.getCodec(new Path("/tmp/foo.bar"));
|
||||
checkCodec("full factory bar codec", BarCodec.class, codec);
|
||||
codec = factory.getCodec(new Path("/tmp/foo/baz.foo.bar"));
|
||||
checkCodec("full factory foo bar codec", FooBarCodec.class, codec);
|
||||
codec = factory.getCodec(new Path("/tmp/foo.foo"));
|
||||
checkCodec("full factory foo codec", FooCodec.class, codec);
|
||||
}
|
||||
}
|
170
src/test/org/apache/hadoop/io/retry/TestRetryProxy.java
Normal file
170
src/test/org/apache/hadoop/io/retry/TestRetryProxy.java
Normal file
@ -0,0 +1,170 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io.retry;
|
||||
|
||||
import static org.apache.hadoop.io.retry.RetryPolicies.RETRY_FOREVER;
|
||||
import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_DONT_FAIL;
|
||||
import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_THEN_FAIL;
|
||||
import static org.apache.hadoop.io.retry.RetryPolicies.retryByException;
|
||||
import static org.apache.hadoop.io.retry.RetryPolicies.retryByRemoteException;
|
||||
import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
|
||||
import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithProportionalSleep;
|
||||
import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumTimeWithFixedSleep;
|
||||
import static org.apache.hadoop.io.retry.RetryPolicies.exponentialBackoffRetry;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.io.retry.UnreliableInterface.FatalException;
|
||||
import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
||||
public class TestRetryProxy extends TestCase {
|
||||
|
||||
private UnreliableImplementation unreliableImpl;
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
unreliableImpl = new UnreliableImplementation();
|
||||
}
|
||||
|
||||
public void testTryOnceThenFail() throws UnreliableException {
|
||||
UnreliableInterface unreliable = (UnreliableInterface)
|
||||
RetryProxy.create(UnreliableInterface.class, unreliableImpl, TRY_ONCE_THEN_FAIL);
|
||||
unreliable.alwaysSucceeds();
|
||||
try {
|
||||
unreliable.failsOnceThenSucceeds();
|
||||
fail("Should fail");
|
||||
} catch (UnreliableException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testTryOnceDontFail() throws UnreliableException {
|
||||
UnreliableInterface unreliable = (UnreliableInterface)
|
||||
RetryProxy.create(UnreliableInterface.class, unreliableImpl, TRY_ONCE_DONT_FAIL);
|
||||
unreliable.alwaysSucceeds();
|
||||
unreliable.failsOnceThenSucceeds();
|
||||
try {
|
||||
unreliable.failsOnceThenSucceedsWithReturnValue();
|
||||
fail("Should fail");
|
||||
} catch (UnreliableException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testRetryForever() throws UnreliableException {
|
||||
UnreliableInterface unreliable = (UnreliableInterface)
|
||||
RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
|
||||
unreliable.alwaysSucceeds();
|
||||
unreliable.failsOnceThenSucceeds();
|
||||
unreliable.failsTenTimesThenSucceeds();
|
||||
}
|
||||
|
||||
public void testRetryUpToMaximumCountWithFixedSleep() throws UnreliableException {
|
||||
UnreliableInterface unreliable = (UnreliableInterface)
|
||||
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
|
||||
retryUpToMaximumCountWithFixedSleep(8, 1, TimeUnit.NANOSECONDS));
|
||||
unreliable.alwaysSucceeds();
|
||||
unreliable.failsOnceThenSucceeds();
|
||||
try {
|
||||
unreliable.failsTenTimesThenSucceeds();
|
||||
fail("Should fail");
|
||||
} catch (UnreliableException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testRetryUpToMaximumTimeWithFixedSleep() throws UnreliableException {
|
||||
UnreliableInterface unreliable = (UnreliableInterface)
|
||||
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
|
||||
retryUpToMaximumTimeWithFixedSleep(80, 10, TimeUnit.NANOSECONDS));
|
||||
unreliable.alwaysSucceeds();
|
||||
unreliable.failsOnceThenSucceeds();
|
||||
try {
|
||||
unreliable.failsTenTimesThenSucceeds();
|
||||
fail("Should fail");
|
||||
} catch (UnreliableException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testRetryUpToMaximumCountWithProportionalSleep() throws UnreliableException {
|
||||
UnreliableInterface unreliable = (UnreliableInterface)
|
||||
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
|
||||
retryUpToMaximumCountWithProportionalSleep(8, 1, TimeUnit.NANOSECONDS));
|
||||
unreliable.alwaysSucceeds();
|
||||
unreliable.failsOnceThenSucceeds();
|
||||
try {
|
||||
unreliable.failsTenTimesThenSucceeds();
|
||||
fail("Should fail");
|
||||
} catch (UnreliableException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testExponentialRetry() throws UnreliableException {
|
||||
UnreliableInterface unreliable = (UnreliableInterface)
|
||||
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
|
||||
exponentialBackoffRetry(5, 1L, TimeUnit.NANOSECONDS));
|
||||
unreliable.alwaysSucceeds();
|
||||
unreliable.failsOnceThenSucceeds();
|
||||
try {
|
||||
unreliable.failsTenTimesThenSucceeds();
|
||||
fail("Should fail");
|
||||
} catch (UnreliableException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testRetryByException() throws UnreliableException {
|
||||
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
|
||||
Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
|
||||
|
||||
UnreliableInterface unreliable = (UnreliableInterface)
|
||||
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
|
||||
retryByException(RETRY_FOREVER, exceptionToPolicyMap));
|
||||
unreliable.failsOnceThenSucceeds();
|
||||
try {
|
||||
unreliable.alwaysFailsWithFatalException();
|
||||
fail("Should fail");
|
||||
} catch (FatalException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testRetryByRemoteException() throws UnreliableException {
|
||||
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
|
||||
Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
|
||||
|
||||
UnreliableInterface unreliable = (UnreliableInterface)
|
||||
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
|
||||
retryByRemoteException(RETRY_FOREVER, exceptionToPolicyMap));
|
||||
try {
|
||||
unreliable.alwaysFailsWithRemoteFatalException();
|
||||
fail("Should fail");
|
||||
} catch (RemoteException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,60 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io.retry;
|
||||
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
||||
public class UnreliableImplementation implements UnreliableInterface {
|
||||
|
||||
private int failsOnceInvocationCount,
|
||||
failsOnceWithValueInvocationCount,
|
||||
failsTenTimesInvocationCount;
|
||||
|
||||
public void alwaysSucceeds() {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
public void alwaysFailsWithFatalException() throws FatalException {
|
||||
throw new FatalException();
|
||||
}
|
||||
|
||||
public void alwaysFailsWithRemoteFatalException() throws RemoteException {
|
||||
throw new RemoteException(FatalException.class.getName(), "Oops");
|
||||
}
|
||||
|
||||
public void failsOnceThenSucceeds() throws UnreliableException {
|
||||
if (failsOnceInvocationCount++ == 0) {
|
||||
throw new UnreliableException();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException {
|
||||
if (failsOnceWithValueInvocationCount++ == 0) {
|
||||
throw new UnreliableException();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public void failsTenTimesThenSucceeds() throws UnreliableException {
|
||||
if (failsTenTimesInvocationCount++ < 10) {
|
||||
throw new UnreliableException();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
42
src/test/org/apache/hadoop/io/retry/UnreliableInterface.java
Normal file
42
src/test/org/apache/hadoop/io/retry/UnreliableInterface.java
Normal file
@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io.retry;
|
||||
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
||||
public interface UnreliableInterface {
|
||||
|
||||
public static class UnreliableException extends Exception {
|
||||
// no body
|
||||
}
|
||||
|
||||
public static class FatalException extends UnreliableException {
|
||||
// no body
|
||||
}
|
||||
|
||||
void alwaysSucceeds() throws UnreliableException;
|
||||
|
||||
void alwaysFailsWithFatalException() throws FatalException;
|
||||
void alwaysFailsWithRemoteFatalException() throws RemoteException;
|
||||
|
||||
void failsOnceThenSucceeds() throws UnreliableException;
|
||||
boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException;
|
||||
|
||||
void failsTenTimesThenSucceeds() throws UnreliableException;
|
||||
}
|
@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io.serializer;
|
||||
|
||||
import static org.apache.hadoop.io.TestGenericWritable.CONF_TEST_KEY;
|
||||
import static org.apache.hadoop.io.TestGenericWritable.CONF_TEST_VALUE;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.DataInputBuffer;
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.TestGenericWritable.Baz;
|
||||
import org.apache.hadoop.io.TestGenericWritable.FooGenericWritable;
|
||||
import org.apache.hadoop.util.GenericsUtil;
|
||||
|
||||
public class TestWritableSerialization extends TestCase {
|
||||
|
||||
private static final Configuration conf = new Configuration();
|
||||
|
||||
static {
|
||||
conf.set("io.serializations"
|
||||
, "org.apache.hadoop.io.serializer.WritableSerialization");
|
||||
}
|
||||
|
||||
public void testWritableSerialization() throws Exception {
|
||||
Text before = new Text("test writable");
|
||||
testSerialization(conf, before);
|
||||
}
|
||||
|
||||
|
||||
public void testWritableConfigurable() throws Exception {
|
||||
|
||||
//set the configuration parameter
|
||||
conf.set(CONF_TEST_KEY, CONF_TEST_VALUE);
|
||||
|
||||
//reuse TestGenericWritable inner classes to test
|
||||
//writables that also implement Configurable.
|
||||
FooGenericWritable generic = new FooGenericWritable();
|
||||
generic.setConf(conf);
|
||||
Baz baz = new Baz();
|
||||
generic.set(baz);
|
||||
Baz result = testSerialization(conf, baz);
|
||||
assertNotNull(result.getConf());
|
||||
}
|
||||
|
||||
/**
|
||||
* A utility that tests serialization/deserialization.
|
||||
* @param <K> the class of the item
|
||||
* @param conf configuration to use, "io.serializations" is read to
|
||||
* determine the serialization
|
||||
* @param before item to (de)serialize
|
||||
* @return deserialized item
|
||||
*/
|
||||
public static<K> K testSerialization(Configuration conf, K before)
|
||||
throws Exception {
|
||||
|
||||
SerializationFactory factory = new SerializationFactory(conf);
|
||||
Serializer<K> serializer
|
||||
= factory.getSerializer(GenericsUtil.getClass(before));
|
||||
Deserializer<K> deserializer
|
||||
= factory.getDeserializer(GenericsUtil.getClass(before));
|
||||
|
||||
DataOutputBuffer out = new DataOutputBuffer();
|
||||
serializer.open(out);
|
||||
serializer.serialize(before);
|
||||
serializer.close();
|
||||
|
||||
DataInputBuffer in = new DataInputBuffer();
|
||||
in.reset(out.getData(), out.getLength());
|
||||
deserializer.open(in);
|
||||
K after = deserializer.deserialize(null);
|
||||
deserializer.close();
|
||||
|
||||
assertEquals(before, after);
|
||||
return after;
|
||||
}
|
||||
|
||||
}
|
243
src/test/org/apache/hadoop/ipc/TestIPC.java
Normal file
243
src/test/org/apache/hadoop/ipc/TestIPC.java
Normal file
@ -0,0 +1,243 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import org.apache.commons.logging.*;
|
||||
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.LongWritable;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
||||
import java.util.Random;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
/** Unit tests for IPC. */
|
||||
public class TestIPC extends TestCase {
|
||||
public static final Log LOG =
|
||||
LogFactory.getLog(TestIPC.class);
|
||||
|
||||
final private static Configuration conf = new Configuration();
|
||||
final static private int PING_INTERVAL = 1000;
|
||||
|
||||
static {
|
||||
Client.setPingInterval(conf, PING_INTERVAL);
|
||||
}
|
||||
public TestIPC(String name) { super(name); }
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
private static final String ADDRESS = "0.0.0.0";
|
||||
|
||||
private static class TestServer extends Server {
|
||||
private boolean sleep;
|
||||
|
||||
public TestServer(int handlerCount, boolean sleep)
|
||||
throws IOException {
|
||||
super(ADDRESS, 0, LongWritable.class, handlerCount, conf);
|
||||
this.sleep = sleep;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writable call(Class<?> protocol, Writable param, long receiveTime)
|
||||
throws IOException {
|
||||
if (sleep) {
|
||||
try {
|
||||
Thread.sleep(RANDOM.nextInt(2*PING_INTERVAL)); // sleep a bit
|
||||
} catch (InterruptedException e) {}
|
||||
}
|
||||
return param; // echo param as result
|
||||
}
|
||||
}
|
||||
|
||||
private static class SerialCaller extends Thread {
|
||||
private Client client;
|
||||
private InetSocketAddress server;
|
||||
private int count;
|
||||
private boolean failed;
|
||||
|
||||
public SerialCaller(Client client, InetSocketAddress server, int count) {
|
||||
this.client = client;
|
||||
this.server = server;
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
for (int i = 0; i < count; i++) {
|
||||
try {
|
||||
LongWritable param = new LongWritable(RANDOM.nextLong());
|
||||
LongWritable value =
|
||||
(LongWritable)client.call(param, server);
|
||||
if (!param.equals(value)) {
|
||||
LOG.fatal("Call failed!");
|
||||
failed = true;
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.fatal("Caught: " + StringUtils.stringifyException(e));
|
||||
failed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class ParallelCaller extends Thread {
|
||||
private Client client;
|
||||
private int count;
|
||||
private InetSocketAddress[] addresses;
|
||||
private boolean failed;
|
||||
|
||||
public ParallelCaller(Client client, InetSocketAddress[] addresses,
|
||||
int count) {
|
||||
this.client = client;
|
||||
this.addresses = addresses;
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
for (int i = 0; i < count; i++) {
|
||||
try {
|
||||
Writable[] params = new Writable[addresses.length];
|
||||
for (int j = 0; j < addresses.length; j++)
|
||||
params[j] = new LongWritable(RANDOM.nextLong());
|
||||
Writable[] values = client.call(params, addresses);
|
||||
for (int j = 0; j < addresses.length; j++) {
|
||||
if (!params[j].equals(values[j])) {
|
||||
LOG.fatal("Call failed!");
|
||||
failed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.fatal("Caught: " + StringUtils.stringifyException(e));
|
||||
failed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testSerial() throws Exception {
|
||||
testSerial(3, false, 2, 5, 100);
|
||||
}
|
||||
|
||||
public void testSerial(int handlerCount, boolean handlerSleep,
|
||||
int clientCount, int callerCount, int callCount)
|
||||
throws Exception {
|
||||
Server server = new TestServer(handlerCount, handlerSleep);
|
||||
InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
server.start();
|
||||
|
||||
Client[] clients = new Client[clientCount];
|
||||
for (int i = 0; i < clientCount; i++) {
|
||||
clients[i] = new Client(LongWritable.class, conf);
|
||||
}
|
||||
|
||||
SerialCaller[] callers = new SerialCaller[callerCount];
|
||||
for (int i = 0; i < callerCount; i++) {
|
||||
callers[i] = new SerialCaller(clients[i%clientCount], addr, callCount);
|
||||
callers[i].start();
|
||||
}
|
||||
for (int i = 0; i < callerCount; i++) {
|
||||
callers[i].join();
|
||||
assertFalse(callers[i].failed);
|
||||
}
|
||||
for (int i = 0; i < clientCount; i++) {
|
||||
clients[i].stop();
|
||||
}
|
||||
server.stop();
|
||||
}
|
||||
|
||||
public void testParallel() throws Exception {
|
||||
testParallel(10, false, 2, 4, 2, 4, 100);
|
||||
}
|
||||
|
||||
public void testParallel(int handlerCount, boolean handlerSleep,
|
||||
int serverCount, int addressCount,
|
||||
int clientCount, int callerCount, int callCount)
|
||||
throws Exception {
|
||||
Server[] servers = new Server[serverCount];
|
||||
for (int i = 0; i < serverCount; i++) {
|
||||
servers[i] = new TestServer(handlerCount, handlerSleep);
|
||||
servers[i].start();
|
||||
}
|
||||
|
||||
InetSocketAddress[] addresses = new InetSocketAddress[addressCount];
|
||||
for (int i = 0; i < addressCount; i++) {
|
||||
addresses[i] = NetUtils.getConnectAddress(servers[i%serverCount]);
|
||||
}
|
||||
|
||||
Client[] clients = new Client[clientCount];
|
||||
for (int i = 0; i < clientCount; i++) {
|
||||
clients[i] = new Client(LongWritable.class, conf);
|
||||
}
|
||||
|
||||
ParallelCaller[] callers = new ParallelCaller[callerCount];
|
||||
for (int i = 0; i < callerCount; i++) {
|
||||
callers[i] =
|
||||
new ParallelCaller(clients[i%clientCount], addresses, callCount);
|
||||
callers[i].start();
|
||||
}
|
||||
for (int i = 0; i < callerCount; i++) {
|
||||
callers[i].join();
|
||||
assertFalse(callers[i].failed);
|
||||
}
|
||||
for (int i = 0; i < clientCount; i++) {
|
||||
clients[i].stop();
|
||||
}
|
||||
for (int i = 0; i < serverCount; i++) {
|
||||
servers[i].stop();
|
||||
}
|
||||
}
|
||||
|
||||
public void testStandAloneClient() throws Exception {
|
||||
testParallel(10, false, 2, 4, 2, 4, 100);
|
||||
Client client = new Client(LongWritable.class, conf);
|
||||
InetSocketAddress address = new InetSocketAddress("127.0.0.1", 10);
|
||||
try {
|
||||
client.call(new LongWritable(RANDOM.nextLong()),
|
||||
address);
|
||||
fail("Expected an exception to have been thrown");
|
||||
} catch (IOException e) {
|
||||
String message = e.getMessage();
|
||||
String addressText = address.toString();
|
||||
assertTrue("Did not find "+addressText+" in "+message,
|
||||
message.contains(addressText));
|
||||
Throwable cause=e.getCause();
|
||||
assertNotNull("No nested exception in "+e,cause);
|
||||
String causeText=cause.getMessage();
|
||||
assertTrue("Did not find " + causeText + " in " + message,
|
||||
message.contains(causeText));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
||||
//new TestIPC("test").testSerial(5, false, 2, 10, 1000);
|
||||
|
||||
new TestIPC("test").testParallel(10, false, 2, 4, 2, 4, 1000);
|
||||
|
||||
}
|
||||
|
||||
}
|
150
src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java
Normal file
150
src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java
Normal file
@ -0,0 +1,150 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Random;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.BytesWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
||||
/**
|
||||
* This test provokes partial writes in the server, which is
|
||||
* serving multiple clients.
|
||||
*/
|
||||
public class TestIPCServerResponder extends TestCase {
|
||||
|
||||
public static final Log LOG =
|
||||
LogFactory.getLog(TestIPCServerResponder.class);
|
||||
|
||||
private static Configuration conf = new Configuration();
|
||||
|
||||
public TestIPCServerResponder(final String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
private static final String ADDRESS = "0.0.0.0";
|
||||
|
||||
private static final int BYTE_COUNT = 1024;
|
||||
private static final byte[] BYTES = new byte[BYTE_COUNT];
|
||||
static {
|
||||
for (int i = 0; i < BYTE_COUNT; i++)
|
||||
BYTES[i] = (byte) ('a' + (i % 26));
|
||||
}
|
||||
|
||||
private static class TestServer extends Server {
|
||||
|
||||
private boolean sleep;
|
||||
|
||||
public TestServer(final int handlerCount, final boolean sleep)
|
||||
throws IOException {
|
||||
super(ADDRESS, 0, BytesWritable.class, handlerCount, conf);
|
||||
// Set the buffer size to half of the maximum parameter/result size
|
||||
// to force the socket to block
|
||||
this.setSocketSendBufSize(BYTE_COUNT / 2);
|
||||
this.sleep = sleep;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writable call(Class<?> protocol, Writable param, long receiveTime)
|
||||
throws IOException {
|
||||
if (sleep) {
|
||||
try {
|
||||
Thread.sleep(RANDOM.nextInt(20)); // sleep a bit
|
||||
} catch (InterruptedException e) {}
|
||||
}
|
||||
return param;
|
||||
}
|
||||
}
|
||||
|
||||
private static class Caller extends Thread {
|
||||
|
||||
private Client client;
|
||||
private int count;
|
||||
private InetSocketAddress address;
|
||||
private boolean failed;
|
||||
|
||||
public Caller(final Client client, final InetSocketAddress address,
|
||||
final int count) {
|
||||
this.client = client;
|
||||
this.address = address;
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
for (int i = 0; i < count; i++) {
|
||||
try {
|
||||
int byteSize = RANDOM.nextInt(BYTE_COUNT);
|
||||
byte[] bytes = new byte[byteSize];
|
||||
System.arraycopy(BYTES, 0, bytes, 0, byteSize);
|
||||
Writable param = new BytesWritable(bytes);
|
||||
Writable value = client.call(param, address);
|
||||
Thread.sleep(RANDOM.nextInt(20));
|
||||
} catch (Exception e) {
|
||||
LOG.fatal("Caught: " + e);
|
||||
failed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testServerResponder() throws Exception {
|
||||
testServerResponder(10, true, 1, 10, 200);
|
||||
}
|
||||
|
||||
public void testServerResponder(final int handlerCount,
|
||||
final boolean handlerSleep,
|
||||
final int clientCount,
|
||||
final int callerCount,
|
||||
final int callCount) throws Exception {
|
||||
Server server = new TestServer(handlerCount, handlerSleep);
|
||||
server.start();
|
||||
|
||||
InetSocketAddress address = NetUtils.getConnectAddress(server);
|
||||
Client[] clients = new Client[clientCount];
|
||||
for (int i = 0; i < clientCount; i++) {
|
||||
clients[i] = new Client(BytesWritable.class, conf);
|
||||
}
|
||||
|
||||
Caller[] callers = new Caller[callerCount];
|
||||
for (int i = 0; i < callerCount; i++) {
|
||||
callers[i] = new Caller(clients[i % clientCount], address, callCount);
|
||||
callers[i].start();
|
||||
}
|
||||
for (int i = 0; i < callerCount; i++) {
|
||||
callers[i].join();
|
||||
assertFalse(callers[i].failed);
|
||||
}
|
||||
for (int i = 0; i < clientCount; i++) {
|
||||
clients[i].stop();
|
||||
}
|
||||
server.stop();
|
||||
}
|
||||
|
||||
}
|
391
src/test/org/apache/hadoop/ipc/TestRPC.java
Normal file
391
src/test/org/apache/hadoop/ipc/TestRPC.java
Normal file
@ -0,0 +1,391 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.lang.reflect.Method;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.commons.logging.*;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.UTF8;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||
import org.apache.hadoop.security.authorize.ConfiguredPolicy;
|
||||
import org.apache.hadoop.security.authorize.PolicyProvider;
|
||||
import org.apache.hadoop.security.authorize.Service;
|
||||
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
|
||||
|
||||
/** Unit tests for RPC. */
|
||||
public class TestRPC extends TestCase {
|
||||
private static final String ADDRESS = "0.0.0.0";
|
||||
|
||||
public static final Log LOG =
|
||||
LogFactory.getLog(TestRPC.class);
|
||||
|
||||
private static Configuration conf = new Configuration();
|
||||
|
||||
int datasize = 1024*100;
|
||||
int numThreads = 50;
|
||||
|
||||
public TestRPC(String name) { super(name); }
|
||||
|
||||
public interface TestProtocol extends VersionedProtocol {
|
||||
public static final long versionID = 1L;
|
||||
|
||||
void ping() throws IOException;
|
||||
void slowPing(boolean shouldSlow) throws IOException;
|
||||
String echo(String value) throws IOException;
|
||||
String[] echo(String[] value) throws IOException;
|
||||
Writable echo(Writable value) throws IOException;
|
||||
int add(int v1, int v2) throws IOException;
|
||||
int add(int[] values) throws IOException;
|
||||
int error() throws IOException;
|
||||
void testServerGet() throws IOException;
|
||||
int[] exchange(int[] values) throws IOException;
|
||||
}
|
||||
|
||||
public class TestImpl implements TestProtocol {
|
||||
int fastPingCounter = 0;
|
||||
|
||||
public long getProtocolVersion(String protocol, long clientVersion) {
|
||||
return TestProtocol.versionID;
|
||||
}
|
||||
|
||||
public void ping() {}
|
||||
|
||||
public synchronized void slowPing(boolean shouldSlow) {
|
||||
if (shouldSlow) {
|
||||
while (fastPingCounter < 2) {
|
||||
try {
|
||||
wait(); // slow response until two fast pings happened
|
||||
} catch (InterruptedException ignored) {}
|
||||
}
|
||||
fastPingCounter -= 2;
|
||||
} else {
|
||||
fastPingCounter++;
|
||||
notify();
|
||||
}
|
||||
}
|
||||
|
||||
public String echo(String value) throws IOException { return value; }
|
||||
|
||||
public String[] echo(String[] values) throws IOException { return values; }
|
||||
|
||||
public Writable echo(Writable writable) {
|
||||
return writable;
|
||||
}
|
||||
public int add(int v1, int v2) {
|
||||
return v1 + v2;
|
||||
}
|
||||
|
||||
public int add(int[] values) {
|
||||
int sum = 0;
|
||||
for (int i = 0; i < values.length; i++) {
|
||||
sum += values[i];
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
public int error() throws IOException {
|
||||
throw new IOException("bobo");
|
||||
}
|
||||
|
||||
public void testServerGet() throws IOException {
|
||||
if (!(Server.get() instanceof RPC.Server)) {
|
||||
throw new IOException("Server.get() failed");
|
||||
}
|
||||
}
|
||||
|
||||
public int[] exchange(int[] values) {
|
||||
for (int i = 0; i < values.length; i++) {
|
||||
values[i] = i;
|
||||
}
|
||||
return values;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// an object that does a bunch of transactions
|
||||
//
|
||||
static class Transactions implements Runnable {
|
||||
int datasize;
|
||||
TestProtocol proxy;
|
||||
|
||||
Transactions(TestProtocol proxy, int datasize) {
|
||||
this.proxy = proxy;
|
||||
this.datasize = datasize;
|
||||
}
|
||||
|
||||
// do two RPC that transfers data.
|
||||
public void run() {
|
||||
int[] indata = new int[datasize];
|
||||
int[] outdata = null;
|
||||
int val = 0;
|
||||
try {
|
||||
outdata = proxy.exchange(indata);
|
||||
val = proxy.add(1,2);
|
||||
} catch (IOException e) {
|
||||
assertTrue("Exception from RPC exchange() " + e, false);
|
||||
}
|
||||
assertEquals(indata.length, outdata.length);
|
||||
assertEquals(val, 3);
|
||||
for (int i = 0; i < outdata.length; i++) {
|
||||
assertEquals(outdata[i], i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// A class that does an RPC but does not read its response.
|
||||
//
|
||||
static class SlowRPC implements Runnable {
|
||||
private TestProtocol proxy;
|
||||
private volatile boolean done;
|
||||
|
||||
SlowRPC(TestProtocol proxy) {
|
||||
this.proxy = proxy;
|
||||
done = false;
|
||||
}
|
||||
|
||||
boolean isDone() {
|
||||
return done;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
try {
|
||||
proxy.slowPing(true); // this would hang until two fast pings happened
|
||||
done = true;
|
||||
} catch (IOException e) {
|
||||
assertTrue("SlowRPC ping exception " + e, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testSlowRpc() throws Exception {
|
||||
System.out.println("Testing Slow RPC");
|
||||
// create a server with two handlers
|
||||
Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 2, false, conf);
|
||||
TestProtocol proxy = null;
|
||||
|
||||
try {
|
||||
server.start();
|
||||
|
||||
InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
// create a client
|
||||
proxy = (TestProtocol)RPC.getProxy(
|
||||
TestProtocol.class, TestProtocol.versionID, addr, conf);
|
||||
|
||||
SlowRPC slowrpc = new SlowRPC(proxy);
|
||||
Thread thread = new Thread(slowrpc, "SlowRPC");
|
||||
thread.start(); // send a slow RPC, which won't return until two fast pings
|
||||
assertTrue("Slow RPC should not have finished1.", !slowrpc.isDone());
|
||||
|
||||
proxy.slowPing(false); // first fast ping
|
||||
|
||||
// verify that the first RPC is still stuck
|
||||
assertTrue("Slow RPC should not have finished2.", !slowrpc.isDone());
|
||||
|
||||
proxy.slowPing(false); // second fast ping
|
||||
|
||||
// Now the slow ping should be able to be executed
|
||||
while (!slowrpc.isDone()) {
|
||||
System.out.println("Waiting for slow RPC to get done.");
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {}
|
||||
}
|
||||
} finally {
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
System.out.println("Down slow rpc testing");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testCalls() throws Exception {
|
||||
Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, conf);
|
||||
TestProtocol proxy = null;
|
||||
try {
|
||||
server.start();
|
||||
|
||||
InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
proxy = (TestProtocol)RPC.getProxy(
|
||||
TestProtocol.class, TestProtocol.versionID, addr, conf);
|
||||
|
||||
proxy.ping();
|
||||
|
||||
String stringResult = proxy.echo("foo");
|
||||
assertEquals(stringResult, "foo");
|
||||
|
||||
stringResult = proxy.echo((String)null);
|
||||
assertEquals(stringResult, null);
|
||||
|
||||
String[] stringResults = proxy.echo(new String[]{"foo","bar"});
|
||||
assertTrue(Arrays.equals(stringResults, new String[]{"foo","bar"}));
|
||||
|
||||
stringResults = proxy.echo((String[])null);
|
||||
assertTrue(Arrays.equals(stringResults, null));
|
||||
|
||||
UTF8 utf8Result = (UTF8)proxy.echo(new UTF8("hello world"));
|
||||
assertEquals(utf8Result, new UTF8("hello world"));
|
||||
|
||||
utf8Result = (UTF8)proxy.echo((UTF8)null);
|
||||
assertEquals(utf8Result, null);
|
||||
|
||||
int intResult = proxy.add(1, 2);
|
||||
assertEquals(intResult, 3);
|
||||
|
||||
intResult = proxy.add(new int[] {1, 2});
|
||||
assertEquals(intResult, 3);
|
||||
|
||||
boolean caught = false;
|
||||
try {
|
||||
proxy.error();
|
||||
} catch (IOException e) {
|
||||
LOG.debug("Caught " + e);
|
||||
caught = true;
|
||||
}
|
||||
assertTrue(caught);
|
||||
|
||||
proxy.testServerGet();
|
||||
|
||||
// create multiple threads and make them do large data transfers
|
||||
System.out.println("Starting multi-threaded RPC test...");
|
||||
server.setSocketSendBufSize(1024);
|
||||
Thread threadId[] = new Thread[numThreads];
|
||||
for (int i = 0; i < numThreads; i++) {
|
||||
Transactions trans = new Transactions(proxy, datasize);
|
||||
threadId[i] = new Thread(trans, "TransactionThread-" + i);
|
||||
threadId[i].start();
|
||||
}
|
||||
|
||||
// wait for all transactions to get over
|
||||
System.out.println("Waiting for all threads to finish RPCs...");
|
||||
for (int i = 0; i < numThreads; i++) {
|
||||
try {
|
||||
threadId[i].join();
|
||||
} catch (InterruptedException e) {
|
||||
i--; // retry
|
||||
}
|
||||
}
|
||||
|
||||
// try some multi-calls
|
||||
Method echo =
|
||||
TestProtocol.class.getMethod("echo", new Class[] { String.class });
|
||||
String[] strings = (String[])RPC.call(echo, new String[][]{{"a"},{"b"}},
|
||||
new InetSocketAddress[] {addr, addr}, conf);
|
||||
assertTrue(Arrays.equals(strings, new String[]{"a","b"}));
|
||||
|
||||
Method ping = TestProtocol.class.getMethod("ping", new Class[] {});
|
||||
Object[] voids = (Object[])RPC.call(ping, new Object[][]{{},{}},
|
||||
new InetSocketAddress[] {addr, addr}, conf);
|
||||
assertEquals(voids, null);
|
||||
} finally {
|
||||
server.stop();
|
||||
if(proxy!=null) RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
|
||||
public void testStandaloneClient() throws IOException {
|
||||
try {
|
||||
RPC.waitForProxy(TestProtocol.class,
|
||||
TestProtocol.versionID, new InetSocketAddress(ADDRESS, 20), conf, 15000L);
|
||||
fail("We should not have reached here");
|
||||
} catch (ConnectException ioe) {
|
||||
//this is what we expected
|
||||
}
|
||||
}
|
||||
|
||||
private static final String ACL_CONFIG = "test.protocol.acl";
|
||||
|
||||
private static class TestPolicyProvider extends PolicyProvider {
|
||||
|
||||
@Override
|
||||
public Service[] getServices() {
|
||||
return new Service[] { new Service(ACL_CONFIG, TestProtocol.class) };
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void doRPCs(Configuration conf, boolean expectFailure) throws Exception {
|
||||
SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider()));
|
||||
|
||||
Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf);
|
||||
|
||||
TestProtocol proxy = null;
|
||||
|
||||
server.start();
|
||||
|
||||
InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
try {
|
||||
proxy = (TestProtocol)RPC.getProxy(
|
||||
TestProtocol.class, TestProtocol.versionID, addr, conf);
|
||||
proxy.ping();
|
||||
|
||||
if (expectFailure) {
|
||||
fail("Expect RPC.getProxy to fail with AuthorizationException!");
|
||||
}
|
||||
} catch (RemoteException e) {
|
||||
if (expectFailure) {
|
||||
assertTrue(e.unwrapRemoteException() instanceof AuthorizationException);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
} finally {
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testAuthorization() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setBoolean(
|
||||
ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, true);
|
||||
|
||||
// Expect to succeed
|
||||
conf.set(ACL_CONFIG, "*");
|
||||
doRPCs(conf, false);
|
||||
|
||||
// Reset authorization to expect failure
|
||||
conf.set(ACL_CONFIG, "invalid invalid");
|
||||
doRPCs(conf, true);
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
||||
new TestRPC("test").testCalls();
|
||||
|
||||
}
|
||||
}
|
78
src/test/org/apache/hadoop/log/TestLogLevel.java
Normal file
78
src/test/org/apache/hadoop/log/TestLogLevel.java
Normal file
@ -0,0 +1,78 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.log;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.*;
|
||||
|
||||
import org.apache.hadoop.http.HttpServer;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.commons.logging.*;
|
||||
import org.apache.commons.logging.impl.*;
|
||||
import org.apache.log4j.*;
|
||||
|
||||
public class TestLogLevel extends TestCase {
|
||||
static final PrintStream out = System.out;
|
||||
|
||||
public void testDynamicLogLevel() throws Exception {
|
||||
String logName = TestLogLevel.class.getName();
|
||||
Log testlog = LogFactory.getLog(logName);
|
||||
|
||||
//only test Log4JLogger
|
||||
if (testlog instanceof Log4JLogger) {
|
||||
Logger log = ((Log4JLogger)testlog).getLogger();
|
||||
log.debug("log.debug1");
|
||||
log.info("log.info1");
|
||||
log.error("log.error1");
|
||||
assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));
|
||||
|
||||
HttpServer server = new HttpServer("..", "localhost", 22222, true);
|
||||
server.start();
|
||||
int port = server.getPort();
|
||||
|
||||
//servlet
|
||||
URL url = new URL("http://localhost:" + port
|
||||
+ "/logLevel?log=" + logName + "&level=" + Level.ERROR);
|
||||
out.println("*** Connecting to " + url);
|
||||
URLConnection connection = url.openConnection();
|
||||
connection.connect();
|
||||
|
||||
BufferedReader in = new BufferedReader(new InputStreamReader(
|
||||
connection.getInputStream()));
|
||||
for(String line; (line = in.readLine()) != null; out.println(line));
|
||||
in.close();
|
||||
|
||||
log.debug("log.debug2");
|
||||
log.info("log.info2");
|
||||
log.error("log.error2");
|
||||
assertTrue(Level.ERROR.equals(log.getEffectiveLevel()));
|
||||
|
||||
//command line
|
||||
String[] args = {"-setlevel", "localhost:"+port, logName,""+Level.DEBUG};
|
||||
LogLevel.main(args);
|
||||
log.debug("log.debug3");
|
||||
log.info("log.info3");
|
||||
log.error("log.error3");
|
||||
assertTrue(Level.DEBUG.equals(log.getEffectiveLevel()));
|
||||
}
|
||||
else {
|
||||
out.println(testlog.getClass() + " not tested.");
|
||||
}
|
||||
}
|
||||
}
|
110
src/test/org/apache/hadoop/metrics/TestMetricsServlet.java
Normal file
110
src/test/org/apache/hadoop/metrics/TestMetricsServlet.java
Normal file
@ -0,0 +1,110 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.metrics;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.metrics.MetricsServlet.TagsMetricsPair;
|
||||
import org.apache.hadoop.metrics.spi.NoEmitMetricsContext;
|
||||
import org.apache.hadoop.metrics.spi.OutputRecord;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
public class TestMetricsServlet extends TestCase {
|
||||
MetricsContext nc1;
|
||||
MetricsContext nc2;
|
||||
// List containing nc1 and nc2.
|
||||
List<MetricsContext> contexts;
|
||||
OutputRecord outputRecord;
|
||||
|
||||
/**
|
||||
* Initializes, for testing, two NoEmitMetricsContext's, and adds one value
|
||||
* to the first of them.
|
||||
*/
|
||||
public void setUp() throws IOException {
|
||||
nc1 = new NoEmitMetricsContext();
|
||||
nc1.init("test1", ContextFactory.getFactory());
|
||||
nc2 = new NoEmitMetricsContext();
|
||||
nc2.init("test2", ContextFactory.getFactory());
|
||||
contexts = new ArrayList<MetricsContext>();
|
||||
contexts.add(nc1);
|
||||
contexts.add(nc2);
|
||||
|
||||
MetricsRecord r = nc1.createRecord("testRecord");
|
||||
|
||||
r.setTag("testTag1", "testTagValue1");
|
||||
r.setTag("testTag2", "testTagValue2");
|
||||
r.setMetric("testMetric1", 1);
|
||||
r.setMetric("testMetric2", 33);
|
||||
r.update();
|
||||
|
||||
Map<String, Collection<OutputRecord>> m = nc1.getAllRecords();
|
||||
assertEquals(1, m.size());
|
||||
assertEquals(1, m.values().size());
|
||||
Collection<OutputRecord> outputRecords = m.values().iterator().next();
|
||||
assertEquals(1, outputRecords.size());
|
||||
outputRecord = outputRecords.iterator().next();
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void testTagsMetricsPair() throws IOException {
|
||||
TagsMetricsPair pair = new TagsMetricsPair(outputRecord.getTagsCopy(),
|
||||
outputRecord.getMetricsCopy());
|
||||
String s = JSON.toString(pair);
|
||||
assertEquals(
|
||||
"[{\"testTag1\":\"testTagValue1\",\"testTag2\":\"testTagValue2\"},"+
|
||||
"{\"testMetric1\":1,\"testMetric2\":33}]", s);
|
||||
}
|
||||
|
||||
public void testGetMap() throws IOException {
|
||||
MetricsServlet servlet = new MetricsServlet();
|
||||
Map<String, Map<String, List<TagsMetricsPair>>> m = servlet.makeMap(contexts);
|
||||
assertEquals("Map missing contexts", 2, m.size());
|
||||
assertTrue(m.containsKey("test1"));
|
||||
|
||||
Map<String, List<TagsMetricsPair>> m2 = m.get("test1");
|
||||
|
||||
assertEquals("Missing records", 1, m2.size());
|
||||
assertTrue(m2.containsKey("testRecord"));
|
||||
assertEquals("Wrong number of tags-values pairs.", 1, m2.get("testRecord").size());
|
||||
}
|
||||
|
||||
public void testPrintMap() throws IOException {
|
||||
StringWriter sw = new StringWriter();
|
||||
PrintWriter out = new PrintWriter(sw);
|
||||
MetricsServlet servlet = new MetricsServlet();
|
||||
servlet.printMap(out, servlet.makeMap(contexts));
|
||||
|
||||
String EXPECTED = "" +
|
||||
"test1\n" +
|
||||
" testRecord\n" +
|
||||
" {testTag1=testTagValue1,testTag2=testTagValue2}:\n" +
|
||||
" testMetric1=1\n" +
|
||||
" testMetric2=33\n" +
|
||||
"test2\n";
|
||||
assertEquals(EXPECTED, sw.toString());
|
||||
}
|
||||
}
|
38
src/test/org/apache/hadoop/metrics/spi/TestOutputRecord.java
Normal file
38
src/test/org/apache/hadoop/metrics/spi/TestOutputRecord.java
Normal file
@ -0,0 +1,38 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.metrics.spi;
|
||||
|
||||
import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap;
|
||||
import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestOutputRecord extends TestCase {
|
||||
public void testCopy() {
|
||||
TagMap tags = new TagMap();
|
||||
tags.put("tagkey", "tagval");
|
||||
MetricMap metrics = new MetricMap();
|
||||
metrics.put("metrickey", 123.4);
|
||||
OutputRecord r = new OutputRecord(tags, metrics);
|
||||
|
||||
assertEquals(tags, r.getTagsCopy());
|
||||
assertNotSame(tags, r.getTagsCopy());
|
||||
assertEquals(metrics, r.getMetricsCopy());
|
||||
assertNotSame(metrics, r.getMetricsCopy());
|
||||
}
|
||||
}
|
62
src/test/org/apache/hadoop/net/StaticMapping.java
Normal file
62
src/test/org/apache/hadoop/net/StaticMapping.java
Normal file
@ -0,0 +1,62 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.net;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
|
||||
/**
|
||||
* Implements the {@link DNSToSwitchMapping} via static mappings. Used
|
||||
* in testcases that simulate racks.
|
||||
*
|
||||
*/
|
||||
public class StaticMapping extends Configured implements DNSToSwitchMapping {
|
||||
public void setconf(Configuration conf) {
|
||||
String[] mappings = conf.getStrings("hadoop.configured.node.mapping");
|
||||
if (mappings != null) {
|
||||
for (int i = 0; i < mappings.length; i++) {
|
||||
String str = mappings[i];
|
||||
String host = str.substring(0, str.indexOf('='));
|
||||
String rack = str.substring(str.indexOf('=') + 1);
|
||||
addNodeToRack(host, rack);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Only one instance per JVM */
|
||||
private static Map<String, String> nameToRackMap = new HashMap<String, String>();
|
||||
|
||||
static synchronized public void addNodeToRack(String name, String rackId) {
|
||||
nameToRackMap.put(name, rackId);
|
||||
}
|
||||
public List<String> resolve(List<String> names) {
|
||||
List<String> m = new ArrayList<String>();
|
||||
synchronized (nameToRackMap) {
|
||||
for (String name : names) {
|
||||
String rackId;
|
||||
if ((rackId = nameToRackMap.get(name)) != null) {
|
||||
m.add(rackId);
|
||||
} else {
|
||||
m.add(NetworkTopology.DEFAULT_RACK);
|
||||
}
|
||||
}
|
||||
return m;
|
||||
}
|
||||
}
|
||||
}
|
150
src/test/org/apache/hadoop/net/TestDNS.java
Normal file
150
src/test/org/apache/hadoop/net/TestDNS.java
Normal file
@ -0,0 +1,150 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
package org.apache.hadoop.net;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.net.UnknownHostException;
|
||||
import java.net.InetAddress;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import javax.naming.NameNotFoundException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TestDNS extends TestCase {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(TestDNS.class);
|
||||
private static final String DEFAULT = "default";
|
||||
|
||||
/**
|
||||
* Constructs a test case with the given name.
|
||||
*
|
||||
* @param name test name
|
||||
*/
|
||||
public TestDNS(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that asking for the default hostname works
|
||||
* @throws Exception if hostname lookups fail */
|
||||
public void testGetLocalHost() throws Exception {
|
||||
String hostname = DNS.getDefaultHost(DEFAULT);
|
||||
assertNotNull(hostname);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that repeated calls to getting the local host are fairly fast, and
|
||||
* hence that caching is being used
|
||||
* @throws Exception if hostname lookups fail
|
||||
*/
|
||||
public void testGetLocalHostIsFast() throws Exception {
|
||||
String hostname = DNS.getDefaultHost(DEFAULT);
|
||||
assertNotNull(hostname);
|
||||
long t1 = System.currentTimeMillis();
|
||||
String hostname2 = DNS.getDefaultHost(DEFAULT);
|
||||
long t2 = System.currentTimeMillis();
|
||||
String hostname3 = DNS.getDefaultHost(DEFAULT);
|
||||
long t3 = System.currentTimeMillis();
|
||||
assertEquals(hostname3, hostname2);
|
||||
assertEquals(hostname2, hostname);
|
||||
long interval2 = t3 - t2;
|
||||
assertTrue(
|
||||
"It is taking to long to determine the local host -caching is not working",
|
||||
interval2 < 20000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that our local IP address is not null
|
||||
* @throws Exception if something went wrong
|
||||
*/
|
||||
public void testLocalHostHasAnAddress() throws Exception {
|
||||
assertNotNull(getLocalIPAddr());
|
||||
}
|
||||
|
||||
private InetAddress getLocalIPAddr() throws UnknownHostException {
|
||||
String hostname = DNS.getDefaultHost(DEFAULT);
|
||||
InetAddress localhost = InetAddress.getByName(hostname);
|
||||
return localhost;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that passing a null pointer is as the interface
|
||||
* fails with a NullPointerException
|
||||
* @throws Exception if something went wrong
|
||||
*/
|
||||
public void testNullInterface() throws Exception {
|
||||
try {
|
||||
String host = DNS.getDefaultHost(null);
|
||||
fail("Expected a NullPointerException, got " + host);
|
||||
} catch (NullPointerException expected) {
|
||||
//this is expected
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the IP addresses of an unknown interface, expect to get something
|
||||
* back
|
||||
* @throws Exception if something went wrong
|
||||
*/
|
||||
public void testIPsOfUnknownInterface() throws Exception {
|
||||
String[] ips = DNS.getIPs("name-of-an-unknown-interface");
|
||||
assertNotNull(ips);
|
||||
assertTrue(ips.length > 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* TestCase: get our local address and reverse look it up
|
||||
* @throws Exception if that fails
|
||||
*/
|
||||
public void testRDNS() throws Exception {
|
||||
InetAddress localhost = getLocalIPAddr();
|
||||
try {
|
||||
String s = DNS.reverseDns(localhost, null);
|
||||
LOG.info("Local revers DNS hostname is " + s);
|
||||
} catch (NameNotFoundException e) {
|
||||
if (!localhost.isLinkLocalAddress() || localhost.isLoopbackAddress()) {
|
||||
//these addresses probably won't work with rDNS anyway, unless someone
|
||||
//has unusual entries in their DNS server mapping 1.0.0.127 to localhost
|
||||
LOG.info("Reverse DNS failing as due to incomplete networking", e);
|
||||
LOG.info("Address is " + localhost
|
||||
+ " Loopback=" + localhost.isLoopbackAddress()
|
||||
+ " Linklocal=" + localhost.isLinkLocalAddress());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the name "localhost" resolves to something.
|
||||
*
|
||||
* If this fails, your machine's network is in a mess, go edit /etc/hosts
|
||||
* @throws Exception for any problems
|
||||
*/
|
||||
public void testLocalhostResolves() throws Exception {
|
||||
InetAddress localhost = InetAddress.getByName("localhost");
|
||||
assertNotNull("localhost is null", localhost);
|
||||
LOG.info("Localhost IPAddr is " + localhost.toString());
|
||||
}
|
||||
}
|
46
src/test/org/apache/hadoop/net/TestScriptBasedMapping.java
Normal file
46
src/test/org/apache/hadoop/net/TestScriptBasedMapping.java
Normal file
@ -0,0 +1,46 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.net;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestScriptBasedMapping extends TestCase {
|
||||
|
||||
public void testNoArgsMeansNoResult() {
|
||||
ScriptBasedMapping mapping = new ScriptBasedMapping();
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,
|
||||
ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
|
||||
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
|
||||
|
||||
mapping.setConf(conf);
|
||||
|
||||
List<String> names = new ArrayList<String>();
|
||||
names.add("some.machine.name");
|
||||
names.add("other.machine.name");
|
||||
|
||||
List<String> result = mapping.resolve(names);
|
||||
assertNull(result);
|
||||
}
|
||||
}
|
155
src/test/org/apache/hadoop/net/TestSocketIOWithTimeout.java
Normal file
155
src/test/org/apache/hadoop/net/TestSocketIOWithTimeout.java
Normal file
@ -0,0 +1,155 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.net;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.nio.channels.Pipe;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/**
|
||||
* This tests timout out from SocketInputStream and
|
||||
* SocketOutputStream using pipes.
|
||||
*
|
||||
* Normal read and write using these streams are tested by pretty much
|
||||
* every DFS unit test.
|
||||
*/
|
||||
public class TestSocketIOWithTimeout extends TestCase {
|
||||
|
||||
static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class);
|
||||
|
||||
private static int TIMEOUT = 1*1000;
|
||||
private static String TEST_STRING = "1234567890";
|
||||
|
||||
private void doIO(InputStream in, OutputStream out) throws IOException {
|
||||
/* Keep on writing or reading until we get SocketTimeoutException.
|
||||
* It expects this exception to occur within 100 millis of TIMEOUT.
|
||||
*/
|
||||
byte buf[] = new byte[4192];
|
||||
|
||||
while (true) {
|
||||
long start = System.currentTimeMillis();
|
||||
try {
|
||||
if (in != null) {
|
||||
in.read(buf);
|
||||
} else {
|
||||
out.write(buf);
|
||||
}
|
||||
} catch (SocketTimeoutException e) {
|
||||
long diff = System.currentTimeMillis() - start;
|
||||
LOG.info("Got SocketTimeoutException as expected after " +
|
||||
diff + " millis : " + e.getMessage());
|
||||
assertTrue(Math.abs(TIMEOUT - diff) <= 200);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Just reads one byte from the input stream.
|
||||
*/
|
||||
static class ReadRunnable implements Runnable {
|
||||
private InputStream in;
|
||||
|
||||
public ReadRunnable(InputStream in) {
|
||||
this.in = in;
|
||||
}
|
||||
public void run() {
|
||||
try {
|
||||
in.read();
|
||||
} catch (IOException e) {
|
||||
LOG.info("Got expection while reading as expected : " +
|
||||
e.getMessage());
|
||||
return;
|
||||
}
|
||||
assertTrue(false);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSocketIOWithTimeout() throws IOException {
|
||||
|
||||
// first open pipe:
|
||||
Pipe pipe = Pipe.open();
|
||||
Pipe.SourceChannel source = pipe.source();
|
||||
Pipe.SinkChannel sink = pipe.sink();
|
||||
|
||||
try {
|
||||
InputStream in = new SocketInputStream(source, TIMEOUT);
|
||||
OutputStream out = new SocketOutputStream(sink, TIMEOUT);
|
||||
|
||||
byte[] writeBytes = TEST_STRING.getBytes();
|
||||
byte[] readBytes = new byte[writeBytes.length];
|
||||
|
||||
out.write(writeBytes);
|
||||
doIO(null, out);
|
||||
|
||||
in.read(readBytes);
|
||||
assertTrue(Arrays.equals(writeBytes, readBytes));
|
||||
doIO(in, null);
|
||||
|
||||
/*
|
||||
* Verify that it handles interrupted threads properly.
|
||||
* Use a large timeout and expect the thread to return quickly.
|
||||
*/
|
||||
in = new SocketInputStream(source, 0);
|
||||
Thread thread = new Thread(new ReadRunnable(in));
|
||||
thread.start();
|
||||
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException ignored) {}
|
||||
|
||||
thread.interrupt();
|
||||
|
||||
try {
|
||||
thread.join();
|
||||
} catch (InterruptedException e) {
|
||||
throw new IOException("Unexpected InterruptedException : " + e);
|
||||
}
|
||||
|
||||
//make sure the channels are still open
|
||||
assertTrue(source.isOpen());
|
||||
assertTrue(sink.isOpen());
|
||||
|
||||
out.close();
|
||||
assertFalse(sink.isOpen());
|
||||
|
||||
// close sink and expect -1 from source.read()
|
||||
assertEquals(-1, in.read());
|
||||
|
||||
// make sure close() closes the underlying channel.
|
||||
in.close();
|
||||
assertFalse(source.isOpen());
|
||||
|
||||
} finally {
|
||||
if (source != null) {
|
||||
source.close();
|
||||
}
|
||||
if (sink != null) {
|
||||
sink.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
120
src/test/org/apache/hadoop/record/FromCpp.java
Normal file
120
src/test/org/apache/hadoop/record/FromCpp.java
Normal file
@ -0,0 +1,120 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.record;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.TreeMap;
|
||||
import junit.framework.*;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class FromCpp extends TestCase {
|
||||
|
||||
public FromCpp(String testName) {
|
||||
super(testName);
|
||||
}
|
||||
|
||||
protected void setUp() throws Exception {
|
||||
}
|
||||
|
||||
protected void tearDown() throws Exception {
|
||||
}
|
||||
|
||||
public void testBinary() {
|
||||
File tmpfile;
|
||||
try {
|
||||
tmpfile = new File("/temp/hadooptmp.dat");
|
||||
RecRecord1 r1 = new RecRecord1();
|
||||
r1.setBoolVal(true);
|
||||
r1.setByteVal((byte)0x66);
|
||||
r1.setFloatVal(3.145F);
|
||||
r1.setDoubleVal(1.5234);
|
||||
r1.setIntVal(4567);
|
||||
r1.setLongVal(0x5a5a5a5a5a5aL);
|
||||
r1.setStringVal("random text");
|
||||
r1.setBufferVal(new Buffer());
|
||||
r1.setVectorVal(new ArrayList<String>());
|
||||
r1.setMapVal(new TreeMap<String,String>());
|
||||
FileInputStream istream = new FileInputStream(tmpfile);
|
||||
BinaryRecordInput in = new BinaryRecordInput(istream);
|
||||
RecRecord1 r2 = new RecRecord1();
|
||||
r2.deserialize(in, "");
|
||||
istream.close();
|
||||
assertTrue(r1.equals(r2));
|
||||
} catch (IOException ex) {
|
||||
ex.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public void testCsv() {
|
||||
File tmpfile;
|
||||
try {
|
||||
tmpfile = new File("/temp/hadooptmp.txt");
|
||||
RecRecord1 r1 = new RecRecord1();
|
||||
r1.setBoolVal(true);
|
||||
r1.setByteVal((byte)0x66);
|
||||
r1.setFloatVal(3.145F);
|
||||
r1.setDoubleVal(1.5234);
|
||||
r1.setIntVal(4567);
|
||||
r1.setLongVal(0x5a5a5a5a5a5aL);
|
||||
r1.setStringVal("random text");
|
||||
r1.setBufferVal(new Buffer());
|
||||
r1.setVectorVal(new ArrayList<String>());
|
||||
r1.setMapVal(new TreeMap<String,String>());
|
||||
FileInputStream istream = new FileInputStream(tmpfile);
|
||||
CsvRecordInput in = new CsvRecordInput(istream);
|
||||
RecRecord1 r2 = new RecRecord1();
|
||||
r2.deserialize(in, "");
|
||||
istream.close();
|
||||
assertTrue(r1.equals(r2));
|
||||
} catch (IOException ex) {
|
||||
ex.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public void testXml() {
|
||||
File tmpfile;
|
||||
try {
|
||||
tmpfile = new File("/temp/hadooptmp.xml");
|
||||
RecRecord1 r1 = new RecRecord1();
|
||||
r1.setBoolVal(true);
|
||||
r1.setByteVal((byte)0x66);
|
||||
r1.setFloatVal(3.145F);
|
||||
r1.setDoubleVal(1.5234);
|
||||
r1.setIntVal(4567);
|
||||
r1.setLongVal(0x5a5a5a5a5a5aL);
|
||||
r1.setStringVal("random text");
|
||||
r1.setBufferVal(new Buffer());
|
||||
r1.setVectorVal(new ArrayList<String>());
|
||||
r1.setMapVal(new TreeMap<String,String>());
|
||||
FileInputStream istream = new FileInputStream(tmpfile);
|
||||
XmlRecordInput in = new XmlRecordInput(istream);
|
||||
RecRecord1 r2 = new RecRecord1();
|
||||
r2.deserialize(in, "");
|
||||
istream.close();
|
||||
assertTrue(r1.equals(r2));
|
||||
} catch (IOException ex) {
|
||||
ex.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
313
src/test/org/apache/hadoop/record/RecordBench.java
Normal file
313
src/test/org/apache/hadoop/record/RecordBench.java
Normal file
@ -0,0 +1,313 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.record;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Array;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Random;
|
||||
|
||||
/**
|
||||
* Benchmark for various types of serializations
|
||||
*/
|
||||
public class RecordBench {
|
||||
|
||||
private static class Times {
|
||||
long init;
|
||||
long serialize;
|
||||
long deserialize;
|
||||
long write;
|
||||
long readFields;
|
||||
};
|
||||
|
||||
private static final long SEED = 0xDEADBEEFL;
|
||||
private static final Random rand = new Random();
|
||||
|
||||
/** Do not allow to create a new instance of RecordBench */
|
||||
private RecordBench() {}
|
||||
|
||||
private static void initBuffers(Record[] buffers) {
|
||||
final int BUFLEN = 32;
|
||||
for (int idx = 0; idx < buffers.length; idx++) {
|
||||
buffers[idx] = new RecBuffer();
|
||||
int buflen = rand.nextInt(BUFLEN);
|
||||
byte[] bytes = new byte[buflen];
|
||||
rand.nextBytes(bytes);
|
||||
((RecBuffer)buffers[idx]).setData(new Buffer(bytes));
|
||||
}
|
||||
}
|
||||
|
||||
private static void initStrings(Record[] strings) {
|
||||
final int STRLEN = 32;
|
||||
for (int idx = 0; idx < strings.length; idx++) {
|
||||
strings[idx] = new RecString();
|
||||
int strlen = rand.nextInt(STRLEN);
|
||||
StringBuilder sb = new StringBuilder(strlen);
|
||||
for (int ich = 0; ich < strlen; ich++) {
|
||||
int cpt = 0;
|
||||
while (true) {
|
||||
cpt = rand.nextInt(0x10FFFF+1);
|
||||
if (Utils.isValidCodePoint(cpt)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
sb.appendCodePoint(cpt);
|
||||
}
|
||||
((RecString)strings[idx]).setData(sb.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private static void initInts(Record[] ints) {
|
||||
for (int idx = 0; idx < ints.length; idx++) {
|
||||
ints[idx] = new RecInt();
|
||||
((RecInt)ints[idx]).setData(rand.nextInt());
|
||||
}
|
||||
}
|
||||
|
||||
private static Record[] makeArray(String type, int numRecords, Times times) {
|
||||
Method init = null;
|
||||
try {
|
||||
init = RecordBench.class.getDeclaredMethod("init"+
|
||||
toCamelCase(type) + "s",
|
||||
new Class[] {Record[].class});
|
||||
} catch (NoSuchMethodException ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
|
||||
Record[] records = new Record[numRecords];
|
||||
times.init = System.nanoTime();
|
||||
try {
|
||||
init.invoke(null, new Object[]{records});
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
times.init = System.nanoTime() - times.init;
|
||||
return records;
|
||||
}
|
||||
|
||||
private static void runBinaryBench(String type, int numRecords, Times times)
|
||||
throws IOException {
|
||||
Record[] records = makeArray(type, numRecords, times);
|
||||
ByteArrayOutputStream bout = new ByteArrayOutputStream();
|
||||
BinaryRecordOutput rout = new BinaryRecordOutput(bout);
|
||||
DataOutputStream dout = new DataOutputStream(bout);
|
||||
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].serialize(rout);
|
||||
}
|
||||
bout.reset();
|
||||
|
||||
times.serialize = System.nanoTime();
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].serialize(rout);
|
||||
}
|
||||
times.serialize = System.nanoTime() - times.serialize;
|
||||
|
||||
byte[] serialized = bout.toByteArray();
|
||||
ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
|
||||
BinaryRecordInput rin = new BinaryRecordInput(bin);
|
||||
|
||||
times.deserialize = System.nanoTime();
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].deserialize(rin);
|
||||
}
|
||||
times.deserialize = System.nanoTime() - times.deserialize;
|
||||
|
||||
bout.reset();
|
||||
|
||||
times.write = System.nanoTime();
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].write(dout);
|
||||
}
|
||||
times.write = System.nanoTime() - times.write;
|
||||
|
||||
bin.reset();
|
||||
DataInputStream din = new DataInputStream(bin);
|
||||
|
||||
times.readFields = System.nanoTime();
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].readFields(din);
|
||||
}
|
||||
times.readFields = System.nanoTime() - times.readFields;
|
||||
}
|
||||
|
||||
private static void runCsvBench(String type, int numRecords, Times times)
|
||||
throws IOException {
|
||||
Record[] records = makeArray(type, numRecords, times);
|
||||
ByteArrayOutputStream bout = new ByteArrayOutputStream();
|
||||
CsvRecordOutput rout = new CsvRecordOutput(bout);
|
||||
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].serialize(rout);
|
||||
}
|
||||
bout.reset();
|
||||
|
||||
times.serialize = System.nanoTime();
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].serialize(rout);
|
||||
}
|
||||
times.serialize = System.nanoTime() - times.serialize;
|
||||
|
||||
byte[] serialized = bout.toByteArray();
|
||||
ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
|
||||
CsvRecordInput rin = new CsvRecordInput(bin);
|
||||
|
||||
times.deserialize = System.nanoTime();
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].deserialize(rin);
|
||||
}
|
||||
times.deserialize = System.nanoTime() - times.deserialize;
|
||||
}
|
||||
|
||||
private static void runXmlBench(String type, int numRecords, Times times)
|
||||
throws IOException {
|
||||
Record[] records = makeArray(type, numRecords, times);
|
||||
ByteArrayOutputStream bout = new ByteArrayOutputStream();
|
||||
XmlRecordOutput rout = new XmlRecordOutput(bout);
|
||||
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].serialize(rout);
|
||||
}
|
||||
bout.reset();
|
||||
|
||||
bout.write("<records>\n".getBytes());
|
||||
|
||||
times.serialize = System.nanoTime();
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].serialize(rout);
|
||||
}
|
||||
times.serialize = System.nanoTime() - times.serialize;
|
||||
|
||||
bout.write("</records>\n".getBytes());
|
||||
|
||||
byte[] serialized = bout.toByteArray();
|
||||
ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
|
||||
|
||||
times.deserialize = System.nanoTime();
|
||||
XmlRecordInput rin = new XmlRecordInput(bin);
|
||||
for(int idx = 0; idx < numRecords; idx++) {
|
||||
records[idx].deserialize(rin);
|
||||
}
|
||||
times.deserialize = System.nanoTime() - times.deserialize;
|
||||
}
|
||||
|
||||
private static void printTimes(String type,
|
||||
String format,
|
||||
int numRecords,
|
||||
Times times) {
|
||||
System.out.println("Type: " + type + " Format: " + format +
|
||||
" #Records: "+numRecords);
|
||||
if (times.init != 0) {
|
||||
System.out.println("Initialization Time (Per record) : "+
|
||||
times.init/numRecords + " Nanoseconds");
|
||||
}
|
||||
|
||||
if (times.serialize != 0) {
|
||||
System.out.println("Serialization Time (Per Record) : "+
|
||||
times.serialize/numRecords + " Nanoseconds");
|
||||
}
|
||||
|
||||
if (times.deserialize != 0) {
|
||||
System.out.println("Deserialization Time (Per Record) : "+
|
||||
times.deserialize/numRecords + " Nanoseconds");
|
||||
}
|
||||
|
||||
if (times.write != 0) {
|
||||
System.out.println("Write Time (Per Record) : "+
|
||||
times.write/numRecords + " Nanoseconds");
|
||||
}
|
||||
|
||||
if (times.readFields != 0) {
|
||||
System.out.println("ReadFields Time (Per Record) : "+
|
||||
times.readFields/numRecords + " Nanoseconds");
|
||||
}
|
||||
|
||||
System.out.println();
|
||||
}
|
||||
|
||||
private static String toCamelCase(String inp) {
|
||||
char firstChar = inp.charAt(0);
|
||||
if (Character.isLowerCase(firstChar)) {
|
||||
return ""+Character.toUpperCase(firstChar) + inp.substring(1);
|
||||
}
|
||||
return inp;
|
||||
}
|
||||
|
||||
private static void exitOnError() {
|
||||
String usage = "RecordBench {buffer|string|int}"+
|
||||
" {binary|csv|xml} <numRecords>";
|
||||
System.out.println(usage);
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param args the command line arguments
|
||||
*/
|
||||
public static void main(String[] args) throws IOException {
|
||||
String version = "RecordBench v0.1";
|
||||
System.out.println(version+"\n");
|
||||
|
||||
if (args.length != 3) {
|
||||
exitOnError();
|
||||
}
|
||||
|
||||
String typeName = args[0];
|
||||
String format = args[1];
|
||||
int numRecords = Integer.decode(args[2]).intValue();
|
||||
|
||||
Method bench = null;
|
||||
try {
|
||||
bench = RecordBench.class.getDeclaredMethod("run"+
|
||||
toCamelCase(format) + "Bench",
|
||||
new Class[] {String.class, Integer.TYPE, Times.class});
|
||||
} catch (NoSuchMethodException ex) {
|
||||
ex.printStackTrace();
|
||||
exitOnError();
|
||||
}
|
||||
|
||||
if (numRecords < 0) {
|
||||
exitOnError();
|
||||
}
|
||||
|
||||
// dry run
|
||||
rand.setSeed(SEED);
|
||||
Times times = new Times();
|
||||
try {
|
||||
bench.invoke(null, new Object[] {typeName, numRecords, times});
|
||||
} catch (Exception ex) {
|
||||
ex.printStackTrace();
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
// timed run
|
||||
rand.setSeed(SEED);
|
||||
try {
|
||||
bench.invoke(null, new Object[] {typeName, numRecords, times});
|
||||
} catch (Exception ex) {
|
||||
ex.printStackTrace();
|
||||
System.exit(1);
|
||||
}
|
||||
printTimes(typeName, format, numRecords, times);
|
||||
}
|
||||
}
|
124
src/test/org/apache/hadoop/record/TestBuffer.java
Normal file
124
src/test/org/apache/hadoop/record/TestBuffer.java
Normal file
@ -0,0 +1,124 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.record;
|
||||
|
||||
import junit.framework.*;
|
||||
|
||||
/**
|
||||
* A Unit test for Record I/O Buffer class
|
||||
*/
|
||||
public class TestBuffer extends TestCase {
|
||||
|
||||
public TestBuffer(String testName) {
|
||||
super(testName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of set method, of class org.apache.hadoop.record.Buffer.
|
||||
*/
|
||||
public void testSet() {
|
||||
final byte[] bytes = new byte[10];
|
||||
final Buffer instance = new Buffer();
|
||||
|
||||
instance.set(bytes);
|
||||
|
||||
assertEquals("set failed", bytes, instance.get());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of copy method, of class org.apache.hadoop.record.Buffer.
|
||||
*/
|
||||
public void testCopy() {
|
||||
final byte[] bytes = new byte[10];
|
||||
final int offset = 6;
|
||||
final int length = 3;
|
||||
for (int idx = 0; idx < 10; idx ++) {
|
||||
bytes[idx] = (byte) idx;
|
||||
}
|
||||
final Buffer instance = new Buffer();
|
||||
|
||||
instance.copy(bytes, offset, length);
|
||||
|
||||
assertEquals("copy failed", 3, instance.getCapacity());
|
||||
assertEquals("copy failed", 3, instance.get().length);
|
||||
for (int idx = 0; idx < 3; idx++) {
|
||||
assertEquals("Buffer content corrupted", idx+6, instance.get()[idx]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of getCount method, of class org.apache.hadoop.record.Buffer.
|
||||
*/
|
||||
public void testGetCount() {
|
||||
final Buffer instance = new Buffer();
|
||||
|
||||
final int expResult = 0;
|
||||
final int result = instance.getCount();
|
||||
assertEquals("getSize failed", expResult, result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of getCapacity method, of class org.apache.hadoop.record.Buffer.
|
||||
*/
|
||||
public void testGetCapacity() {
|
||||
final Buffer instance = new Buffer();
|
||||
|
||||
final int expResult = 0;
|
||||
final int result = instance.getCapacity();
|
||||
assertEquals("getCapacity failed", expResult, result);
|
||||
|
||||
instance.setCapacity(100);
|
||||
assertEquals("setCapacity failed", 100, instance.getCapacity());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of truncate method, of class org.apache.hadoop.record.Buffer.
|
||||
*/
|
||||
public void testTruncate() {
|
||||
final Buffer instance = new Buffer();
|
||||
instance.setCapacity(100);
|
||||
assertEquals("setCapacity failed", 100, instance.getCapacity());
|
||||
|
||||
instance.truncate();
|
||||
assertEquals("truncate failed", 0, instance.getCapacity());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of append method, of class org.apache.hadoop.record.Buffer.
|
||||
*/
|
||||
public void testAppend() {
|
||||
final byte[] bytes = new byte[100];
|
||||
final int offset = 0;
|
||||
final int length = 100;
|
||||
for (int idx = 0; idx < 100; idx++) {
|
||||
bytes[idx] = (byte) (100-idx);
|
||||
}
|
||||
|
||||
final Buffer instance = new Buffer();
|
||||
|
||||
instance.append(bytes, offset, length);
|
||||
|
||||
assertEquals("Buffer size mismatch", 100, instance.getCount());
|
||||
|
||||
for (int idx = 0; idx < 100; idx++) {
|
||||
assertEquals("Buffer contents corrupted", 100-idx, instance.get()[idx]);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user