HDFS-16285. Make HDFS ownership tools cross platform (#3588)
This commit is contained in:
parent
35556ea043
commit
6dddbd42ed
|
@ -23,6 +23,9 @@ add_executable(hdfs_tool_tests
|
||||||
hdfs-delete-snapshot-mock.cc
|
hdfs-delete-snapshot-mock.cc
|
||||||
hdfs-create-snapshot-mock.cc
|
hdfs-create-snapshot-mock.cc
|
||||||
hdfs-cat-mock.cc
|
hdfs-cat-mock.cc
|
||||||
|
hdfs-chown-mock.cc
|
||||||
|
hdfs-chmod-mock.cc
|
||||||
|
hdfs-chgrp-mock.cc
|
||||||
hdfs-tool-test-fixtures.cc
|
hdfs-tool-test-fixtures.cc
|
||||||
hdfs-tool-tests.cc
|
hdfs-tool-tests.cc
|
||||||
hdfs-df-mock.cc
|
hdfs-df-mock.cc
|
||||||
|
@ -36,6 +39,9 @@ target_include_directories(hdfs_tool_tests PRIVATE
|
||||||
../../tools/hdfs-delete-snapshot
|
../../tools/hdfs-delete-snapshot
|
||||||
../../tools/hdfs-create-snapshot
|
../../tools/hdfs-create-snapshot
|
||||||
../../tools/hdfs-rename-snapshot
|
../../tools/hdfs-rename-snapshot
|
||||||
|
../../tools/hdfs-chown
|
||||||
|
../../tools/hdfs-chgrp
|
||||||
|
../../tools/hdfs-chmod
|
||||||
../../tools/hdfs-cat)
|
../../tools/hdfs-cat)
|
||||||
target_link_libraries(hdfs_tool_tests PRIVATE
|
target_link_libraries(hdfs_tool_tests PRIVATE
|
||||||
gmock_main
|
gmock_main
|
||||||
|
@ -45,5 +51,8 @@ target_link_libraries(hdfs_tool_tests PRIVATE
|
||||||
hdfs_deleteSnapshot_lib
|
hdfs_deleteSnapshot_lib
|
||||||
hdfs_createSnapshot_lib
|
hdfs_createSnapshot_lib
|
||||||
hdfs_renameSnapshot_lib
|
hdfs_renameSnapshot_lib
|
||||||
|
hdfs_chown_lib
|
||||||
|
hdfs_chgrp_lib
|
||||||
|
hdfs_chmod_lib
|
||||||
hdfs_cat_lib)
|
hdfs_cat_lib)
|
||||||
add_test(hdfs_tool_tests hdfs_tool_tests)
|
add_test(hdfs_tool_tests hdfs_tool_tests)
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include "hdfs-chgrp-mock.h"
|
||||||
|
#include "hdfs-chgrp.h"
|
||||||
|
#include "hdfs-tool-tests.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools::test {
|
||||||
|
ChgrpMock::~ChgrpMock() = default;
|
||||||
|
|
||||||
|
void ChgrpMock::SetExpectations(
|
||||||
|
std::function<std::unique_ptr<ChgrpMock>()> test_case,
|
||||||
|
const std::vector<std::string> &args) const {
|
||||||
|
// Get the pointer to the function that defines the test case
|
||||||
|
const auto test_case_func =
|
||||||
|
test_case.target<std::unique_ptr<ChgrpMock> (*)()>();
|
||||||
|
ASSERT_NE(test_case_func, nullptr);
|
||||||
|
|
||||||
|
// Set the expected method calls and their corresponding arguments for each
|
||||||
|
// test case
|
||||||
|
if (*test_case_func == &CallHelp<ChgrpMock>) {
|
||||||
|
EXPECT_CALL(*this, HandleHelp()).Times(1).WillOnce(testing::Return(true));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*test_case_func == &PassOwnerAndAPath<ChgrpMock>) {
|
||||||
|
const auto arg1 = args[0];
|
||||||
|
const auto arg2 = args[1];
|
||||||
|
|
||||||
|
EXPECT_CALL(*this, HandlePath(arg1, false, arg2))
|
||||||
|
.Times(1)
|
||||||
|
.WillOnce(testing::Return(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*test_case_func == &PassRecursiveOwnerAndAPath<ChgrpMock>) {
|
||||||
|
const auto arg1 = args[1];
|
||||||
|
const auto arg2 = args[2];
|
||||||
|
|
||||||
|
EXPECT_CALL(*this, HandlePath(arg1, true, arg2))
|
||||||
|
.Times(1)
|
||||||
|
.WillOnce(testing::Return(true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace hdfs::tools::test
|
|
@ -0,0 +1,69 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef LIBHDFSPP_TOOLS_HDFS_CHGRP_MOCK
|
||||||
|
#define LIBHDFSPP_TOOLS_HDFS_CHGRP_MOCK
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
|
||||||
|
#include "hdfs-chgrp.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools::test {
|
||||||
|
/**
|
||||||
|
* {@class ChgrpMock} is an {@class Chgrp} whereby it mocks the
|
||||||
|
* HandleHelp and HandlePath methods for testing their functionality.
|
||||||
|
*/
|
||||||
|
class ChgrpMock : public hdfs::tools::Chgrp {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
ChgrpMock(const int argc, char **argv) : Chgrp(argc, argv) {}
|
||||||
|
|
||||||
|
// Abiding to the Rule of 5
|
||||||
|
ChgrpMock(const ChgrpMock &) = delete;
|
||||||
|
ChgrpMock(ChgrpMock &&) = delete;
|
||||||
|
ChgrpMock &operator=(const ChgrpMock &) = delete;
|
||||||
|
ChgrpMock &operator=(ChgrpMock &&) = delete;
|
||||||
|
~ChgrpMock() override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Defines the methods and the corresponding arguments that are expected
|
||||||
|
* to be called on this instance of {@link HdfsTool} for the given test case.
|
||||||
|
*
|
||||||
|
* @param test_case An {@link std::function} object that points to the
|
||||||
|
* function defining the test case
|
||||||
|
* @param args The arguments that are passed to this test case
|
||||||
|
*/
|
||||||
|
void SetExpectations(std::function<std::unique_ptr<ChgrpMock>()> test_case,
|
||||||
|
const std::vector<std::string> &args = {}) const;
|
||||||
|
|
||||||
|
MOCK_METHOD(bool, HandleHelp, (), (const, override));
|
||||||
|
|
||||||
|
MOCK_METHOD(bool, HandlePath,
|
||||||
|
(const std::string &, bool, const std::string &),
|
||||||
|
(const, override));
|
||||||
|
};
|
||||||
|
} // namespace hdfs::tools::test
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,67 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include "hdfs-chmod-mock.h"
|
||||||
|
#include "hdfs-chmod.h"
|
||||||
|
#include "hdfs-tool-tests.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools::test {
|
||||||
|
ChmodMock::~ChmodMock() = default;
|
||||||
|
|
||||||
|
void ChmodMock::SetExpectations(
|
||||||
|
std::function<std::unique_ptr<ChmodMock>()> test_case,
|
||||||
|
const std::vector<std::string> &args) const {
|
||||||
|
// Get the pointer to the function that defines the test case
|
||||||
|
const auto test_case_func =
|
||||||
|
test_case.target<std::unique_ptr<ChmodMock> (*)()>();
|
||||||
|
ASSERT_NE(test_case_func, nullptr);
|
||||||
|
|
||||||
|
// Set the expected method calls and their corresponding arguments for each
|
||||||
|
// test case
|
||||||
|
if (*test_case_func == &CallHelp<ChmodMock>) {
|
||||||
|
EXPECT_CALL(*this, HandleHelp()).Times(1).WillOnce(testing::Return(true));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*test_case_func == &PassPermissionsAndAPath<ChmodMock>) {
|
||||||
|
const auto arg1 = args[0];
|
||||||
|
const auto arg2 = args[1];
|
||||||
|
|
||||||
|
EXPECT_CALL(*this, HandlePath(arg1, false, arg2))
|
||||||
|
.Times(1)
|
||||||
|
.WillOnce(testing::Return(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*test_case_func == &PassRecursivePermissionsAndAPath<ChmodMock>) {
|
||||||
|
const auto arg1 = args[1];
|
||||||
|
const auto arg2 = args[2];
|
||||||
|
|
||||||
|
EXPECT_CALL(*this, HandlePath(arg1, true, arg2))
|
||||||
|
.Times(1)
|
||||||
|
.WillOnce(testing::Return(true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace hdfs::tools::test
|
|
@ -0,0 +1,69 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef LIBHDFSPP_TOOLS_HDFS_CHMOD_MOCK
|
||||||
|
#define LIBHDFSPP_TOOLS_HDFS_CHMOD_MOCK
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
|
||||||
|
#include "hdfs-chmod.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools::test {
|
||||||
|
/**
|
||||||
|
* {@class ChmodMock} is an {@class Chmod} whereby it mocks the
|
||||||
|
* HandleHelp and HandlePath methods for testing their functionality.
|
||||||
|
*/
|
||||||
|
class ChmodMock : public hdfs::tools::Chmod {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
ChmodMock(const int argc, char **argv) : Chmod(argc, argv) {}
|
||||||
|
|
||||||
|
// Abiding to the Rule of 5
|
||||||
|
ChmodMock(const ChmodMock &) = delete;
|
||||||
|
ChmodMock(ChmodMock &&) = delete;
|
||||||
|
ChmodMock &operator=(const ChmodMock &) = delete;
|
||||||
|
ChmodMock &operator=(ChmodMock &&) = delete;
|
||||||
|
~ChmodMock() override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Defines the methods and the corresponding arguments that are expected
|
||||||
|
* to be called on this instance of {@link HdfsTool} for the given test case.
|
||||||
|
*
|
||||||
|
* @param test_case An {@link std::function} object that points to the
|
||||||
|
* function defining the test case
|
||||||
|
* @param args The arguments that are passed to this test case
|
||||||
|
*/
|
||||||
|
void SetExpectations(std::function<std::unique_ptr<ChmodMock>()> test_case,
|
||||||
|
const std::vector<std::string> &args = {}) const;
|
||||||
|
|
||||||
|
MOCK_METHOD(bool, HandleHelp, (), (const, override));
|
||||||
|
|
||||||
|
MOCK_METHOD(bool, HandlePath,
|
||||||
|
(const std::string &, bool, const std::string &),
|
||||||
|
(const, override));
|
||||||
|
};
|
||||||
|
} // namespace hdfs::tools::test
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,69 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include "hdfs-chown-mock.h"
|
||||||
|
#include "hdfs-chown.h"
|
||||||
|
#include "hdfs-tool-tests.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools::test {
|
||||||
|
ChownMock::~ChownMock() = default;
|
||||||
|
|
||||||
|
void ChownMock::SetExpectations(
|
||||||
|
std::function<std::unique_ptr<ChownMock>()> test_case,
|
||||||
|
const std::vector<std::string> &args) const {
|
||||||
|
// Get the pointer to the function that defines the test case
|
||||||
|
const auto test_case_func =
|
||||||
|
test_case.target<std::unique_ptr<ChownMock> (*)()>();
|
||||||
|
ASSERT_NE(test_case_func, nullptr);
|
||||||
|
|
||||||
|
// Set the expected method calls and their corresponding arguments for each
|
||||||
|
// test case
|
||||||
|
if (*test_case_func == &CallHelp<ChownMock>) {
|
||||||
|
EXPECT_CALL(*this, HandleHelp()).Times(1).WillOnce(testing::Return(true));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*test_case_func == &PassOwnerAndAPath<ChownMock>) {
|
||||||
|
const auto arg1 = args[0];
|
||||||
|
const auto arg2 = args[1];
|
||||||
|
const Ownership ownership(arg1);
|
||||||
|
|
||||||
|
EXPECT_CALL(*this, HandlePath(ownership, false, arg2))
|
||||||
|
.Times(1)
|
||||||
|
.WillOnce(testing::Return(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*test_case_func == &PassRecursiveOwnerAndAPath<ChownMock>) {
|
||||||
|
const auto arg1 = args[1];
|
||||||
|
const auto arg2 = args[2];
|
||||||
|
const Ownership ownership(arg1);
|
||||||
|
|
||||||
|
EXPECT_CALL(*this, HandlePath(ownership, true, arg2))
|
||||||
|
.Times(1)
|
||||||
|
.WillOnce(testing::Return(true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace hdfs::tools::test
|
|
@ -0,0 +1,68 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef LIBHDFSPP_TOOLS_HDFS_CHOWN_MOCK
|
||||||
|
#define LIBHDFSPP_TOOLS_HDFS_CHOWN_MOCK
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
|
||||||
|
#include "hdfs-chown.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools::test {
|
||||||
|
/**
|
||||||
|
* {@class ChownMock} is an {@class Chown} whereby it mocks the
|
||||||
|
* HandleHelp and HandlePath methods for testing their functionality.
|
||||||
|
*/
|
||||||
|
class ChownMock : public hdfs::tools::Chown {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
ChownMock(const int argc, char **argv) : Chown(argc, argv) {}
|
||||||
|
|
||||||
|
// Abiding to the Rule of 5
|
||||||
|
ChownMock(const ChownMock &) = delete;
|
||||||
|
ChownMock(ChownMock &&) = delete;
|
||||||
|
ChownMock &operator=(const ChownMock &) = delete;
|
||||||
|
ChownMock &operator=(ChownMock &&) = delete;
|
||||||
|
~ChownMock() override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Defines the methods and the corresponding arguments that are expected
|
||||||
|
* to be called on this instance of {@link HdfsTool} for the given test case.
|
||||||
|
*
|
||||||
|
* @param test_case An {@link std::function} object that points to the
|
||||||
|
* function defining the test case
|
||||||
|
* @param args The arguments that are passed to this test case
|
||||||
|
*/
|
||||||
|
void SetExpectations(std::function<std::unique_ptr<ChownMock>()> test_case,
|
||||||
|
const std::vector<std::string> &args = {}) const;
|
||||||
|
|
||||||
|
MOCK_METHOD(bool, HandleHelp, (), (const, override));
|
||||||
|
|
||||||
|
MOCK_METHOD(bool, HandlePath, (const Ownership &, bool, const std::string &),
|
||||||
|
(const, override));
|
||||||
|
};
|
||||||
|
} // namespace hdfs::tools::test
|
||||||
|
|
||||||
|
#endif
|
|
@ -21,6 +21,9 @@
|
||||||
|
|
||||||
#include "hdfs-allow-snapshot-mock.h"
|
#include "hdfs-allow-snapshot-mock.h"
|
||||||
#include "hdfs-cat-mock.h"
|
#include "hdfs-cat-mock.h"
|
||||||
|
#include "hdfs-chgrp-mock.h"
|
||||||
|
#include "hdfs-chmod-mock.h"
|
||||||
|
#include "hdfs-chown-mock.h"
|
||||||
#include "hdfs-create-snapshot-mock.h"
|
#include "hdfs-create-snapshot-mock.h"
|
||||||
#include "hdfs-delete-snapshot-mock.h"
|
#include "hdfs-delete-snapshot-mock.h"
|
||||||
#include "hdfs-df-mock.h"
|
#include "hdfs-df-mock.h"
|
||||||
|
@ -69,6 +72,25 @@ INSTANTIATE_TEST_SUITE_P(
|
||||||
testing::Values(CallHelp<hdfs::tools::test::DeleteSnapshotMock>,
|
testing::Values(CallHelp<hdfs::tools::test::DeleteSnapshotMock>,
|
||||||
Pass2Paths<hdfs::tools::test::DeleteSnapshotMock>));
|
Pass2Paths<hdfs::tools::test::DeleteSnapshotMock>));
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
HdfsChown, HdfsToolBasicTest,
|
||||||
|
testing::Values(CallHelp<hdfs::tools::test::ChownMock>,
|
||||||
|
PassOwnerAndAPath<hdfs::tools::test::ChownMock>,
|
||||||
|
PassRecursiveOwnerAndAPath<hdfs::tools::test::ChownMock>));
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
HdfsChmod, HdfsToolBasicTest,
|
||||||
|
testing::Values(
|
||||||
|
CallHelp<hdfs::tools::test::ChmodMock>,
|
||||||
|
PassPermissionsAndAPath<hdfs::tools::test::ChmodMock>,
|
||||||
|
PassRecursivePermissionsAndAPath<hdfs::tools::test::ChmodMock>));
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
HdfsChgrp, HdfsToolBasicTest,
|
||||||
|
testing::Values(CallHelp<hdfs::tools::test::ChgrpMock>,
|
||||||
|
PassOwnerAndAPath<hdfs::tools::test::ChgrpMock>,
|
||||||
|
PassRecursiveOwnerAndAPath<hdfs::tools::test::ChgrpMock>));
|
||||||
|
|
||||||
// Negative tests
|
// Negative tests
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
HdfsAllowSnapshot, HdfsToolNegativeTestThrows,
|
HdfsAllowSnapshot, HdfsToolNegativeTestThrows,
|
||||||
|
@ -99,3 +121,27 @@ INSTANTIATE_TEST_SUITE_P(
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
HdfsDeleteSnapshot, HdfsToolNegativeTestNoThrow,
|
HdfsDeleteSnapshot, HdfsToolNegativeTestNoThrow,
|
||||||
testing::Values(PassAPath<hdfs::tools::test::DeleteSnapshotMock>));
|
testing::Values(PassAPath<hdfs::tools::test::DeleteSnapshotMock>));
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
HdfsChown, HdfsToolNegativeTestNoThrow,
|
||||||
|
testing::Values(PassAPath<hdfs::tools::test::ChownMock>));
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
HdfsChown, HdfsToolNegativeTestThrows,
|
||||||
|
testing::Values(PassNOptAndAPath<hdfs::tools::test::ChownMock>));
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
HdfsChmod, HdfsToolNegativeTestNoThrow,
|
||||||
|
testing::Values(PassAPath<hdfs::tools::test::ChmodMock>));
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
HdfsChmod, HdfsToolNegativeTestThrows,
|
||||||
|
testing::Values(PassNOptAndAPath<hdfs::tools::test::ChmodMock>));
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
HdfsChgrp, HdfsToolNegativeTestNoThrow,
|
||||||
|
testing::Values(PassAPath<hdfs::tools::test::ChgrpMock>));
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
HdfsChgrp, HdfsToolNegativeTestThrows,
|
||||||
|
testing::Values(PassNOptAndAPath<hdfs::tools::test::ChgrpMock>));
|
||||||
|
|
|
@ -97,4 +97,59 @@ template <class T> std::unique_ptr<T> PassNOptAndAPath() {
|
||||||
return hdfs_tool;
|
return hdfs_tool;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class T> std::unique_ptr<T> PassOwnerAndAPath() {
|
||||||
|
constexpr auto argc = 3;
|
||||||
|
static std::string exe("hdfs_tool_name");
|
||||||
|
static std::string arg1("new_owner:new_group");
|
||||||
|
static std::string arg2("g/h/i");
|
||||||
|
|
||||||
|
static char *argv[] = {exe.data(), arg1.data(), arg2.data()};
|
||||||
|
|
||||||
|
auto hdfs_tool = std::make_unique<T>(argc, argv);
|
||||||
|
hdfs_tool->SetExpectations(PassOwnerAndAPath<T>, {arg1, arg2});
|
||||||
|
return hdfs_tool;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T> std::unique_ptr<T> PassRecursiveOwnerAndAPath() {
|
||||||
|
constexpr auto argc = 4;
|
||||||
|
static std::string exe("hdfs_tool_name");
|
||||||
|
static std::string arg1("-R");
|
||||||
|
static std::string arg2("new_owner:new_group");
|
||||||
|
static std::string arg3("g/h/i");
|
||||||
|
|
||||||
|
static char *argv[] = {exe.data(), arg1.data(), arg2.data(), arg3.data()};
|
||||||
|
|
||||||
|
auto hdfs_tool = std::make_unique<T>(argc, argv);
|
||||||
|
hdfs_tool->SetExpectations(PassRecursiveOwnerAndAPath<T>, {arg1, arg2, arg3});
|
||||||
|
return hdfs_tool;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T> std::unique_ptr<T> PassPermissionsAndAPath() {
|
||||||
|
constexpr auto argc = 3;
|
||||||
|
static std::string exe("hdfs_tool_name");
|
||||||
|
static std::string arg1("757");
|
||||||
|
static std::string arg2("g/h/i");
|
||||||
|
|
||||||
|
static char *argv[] = {exe.data(), arg1.data(), arg2.data()};
|
||||||
|
|
||||||
|
auto hdfs_tool = std::make_unique<T>(argc, argv);
|
||||||
|
hdfs_tool->SetExpectations(PassPermissionsAndAPath<T>, {arg1, arg2});
|
||||||
|
return hdfs_tool;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T> std::unique_ptr<T> PassRecursivePermissionsAndAPath() {
|
||||||
|
constexpr auto argc = 4;
|
||||||
|
static std::string exe("hdfs_tool_name");
|
||||||
|
static std::string arg1("-R");
|
||||||
|
static std::string arg2("757");
|
||||||
|
static std::string arg3("g/h/i");
|
||||||
|
|
||||||
|
static char *argv[] = {exe.data(), arg1.data(), arg2.data(), arg3.data()};
|
||||||
|
|
||||||
|
auto hdfs_tool = std::make_unique<T>(argc, argv);
|
||||||
|
hdfs_tool->SetExpectations(PassRecursivePermissionsAndAPath<T>,
|
||||||
|
{arg1, arg2, arg3});
|
||||||
|
return hdfs_tool;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -28,19 +28,18 @@ link_directories( ${LIBHDFSPP_DIR}/lib )
|
||||||
add_library(tools_common_obj OBJECT tools_common.cc)
|
add_library(tools_common_obj OBJECT tools_common.cc)
|
||||||
add_library(tools_common $<TARGET_OBJECTS:tools_common_obj>)
|
add_library(tools_common $<TARGET_OBJECTS:tools_common_obj>)
|
||||||
|
|
||||||
|
add_subdirectory(internal)
|
||||||
|
|
||||||
add_library(hdfs_tool_obj OBJECT hdfs-tool.cc)
|
add_library(hdfs_tool_obj OBJECT hdfs-tool.cc)
|
||||||
target_include_directories(hdfs_tool_obj PRIVATE ../tools)
|
target_include_directories(hdfs_tool_obj PRIVATE ../tools)
|
||||||
|
|
||||||
add_subdirectory(hdfs-cat)
|
add_subdirectory(hdfs-cat)
|
||||||
|
|
||||||
add_executable(hdfs_chgrp hdfs_chgrp.cc)
|
add_subdirectory(hdfs-chgrp)
|
||||||
target_link_libraries(hdfs_chgrp tools_common hdfspp_static)
|
|
||||||
|
|
||||||
add_executable(hdfs_chown hdfs_chown.cc)
|
add_subdirectory(hdfs-chown)
|
||||||
target_link_libraries(hdfs_chown tools_common hdfspp_static)
|
|
||||||
|
|
||||||
add_executable(hdfs_chmod hdfs_chmod.cc)
|
add_subdirectory(hdfs-chmod)
|
||||||
target_link_libraries(hdfs_chmod tools_common hdfspp_static)
|
|
||||||
|
|
||||||
add_executable(hdfs_find hdfs_find.cc)
|
add_executable(hdfs_find hdfs_find.cc)
|
||||||
target_link_libraries(hdfs_find tools_common hdfspp_static)
|
target_link_libraries(hdfs_find tools_common hdfspp_static)
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
add_library(hdfs_chgrp_lib STATIC $<TARGET_OBJECTS:hdfs_tool_obj> $<TARGET_OBJECTS:hdfs_ownership_obj> hdfs-chgrp.cc)
|
||||||
|
target_include_directories(hdfs_chgrp_lib PRIVATE ../../tools hdfs-chgrp ${Boost_INCLUDE_DIRS})
|
||||||
|
target_link_libraries(hdfs_chgrp_lib PRIVATE Boost::boost Boost::program_options tools_common hdfspp_static)
|
||||||
|
|
||||||
|
add_executable(hdfs_chgrp main.cc)
|
||||||
|
target_include_directories(hdfs_chgrp PRIVATE ../../tools)
|
||||||
|
target_link_libraries(hdfs_chgrp PRIVATE hdfs_chgrp_lib)
|
||||||
|
|
||||||
|
install(TARGETS hdfs_chgrp RUNTIME DESTINATION bin)
|
|
@ -0,0 +1,220 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <future>
|
||||||
|
#include <iostream>
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
#include <ostream>
|
||||||
|
#include <sstream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "hdfs-chgrp.h"
|
||||||
|
#include "internal/hdfs-ownership.h"
|
||||||
|
#include "tools_common.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools {
|
||||||
|
Chgrp::Chgrp(const int argc, char **argv) : HdfsTool(argc, argv) {}
|
||||||
|
|
||||||
|
bool Chgrp::Initialize() {
|
||||||
|
auto add_options = opt_desc_.add_options();
|
||||||
|
add_options("help,h", "Change the group association of each FILE to GROUP.");
|
||||||
|
add_options("file", po::value<std::string>(),
|
||||||
|
"The path to the file whose group needs to be modified");
|
||||||
|
add_options("recursive,R", "Operate on files and directories recursively");
|
||||||
|
add_options(
|
||||||
|
"group", po::value<std::string>(),
|
||||||
|
"The group to which the file's group association needs to be changed to");
|
||||||
|
|
||||||
|
// An exception is thrown if these arguments are missing or if the arguments'
|
||||||
|
// count doesn't tally.
|
||||||
|
pos_opt_desc_.add("group", 1);
|
||||||
|
pos_opt_desc_.add("file", 1);
|
||||||
|
|
||||||
|
po::store(po::command_line_parser(argc_, argv_)
|
||||||
|
.options(opt_desc_)
|
||||||
|
.positional(pos_opt_desc_)
|
||||||
|
.run(),
|
||||||
|
opt_val_);
|
||||||
|
po::notify(opt_val_);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chgrp::ValidateConstraints() const {
|
||||||
|
// Only "help" is allowed as single argument
|
||||||
|
if (argc_ == 2) {
|
||||||
|
return opt_val_.count("help");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rest of the cases must contain more than 2 arguments on the command line
|
||||||
|
return argc_ > 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string Chgrp ::GetDescription() const {
|
||||||
|
std::stringstream desc;
|
||||||
|
desc << "Usage: hdfs_chgrp [OPTION] GROUP FILE" << std::endl
|
||||||
|
<< std::endl
|
||||||
|
<< "Change the group association of each FILE to GROUP." << std::endl
|
||||||
|
<< "The user must be the owner of files. Additional information is in "
|
||||||
|
"the Permissions Guide:"
|
||||||
|
<< std::endl
|
||||||
|
<< "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/"
|
||||||
|
"hadoop-hdfs/HdfsPermissionsGuide.html"
|
||||||
|
<< std::endl
|
||||||
|
<< std::endl
|
||||||
|
<< " -R operate on files and directories recursively" << std::endl
|
||||||
|
<< " -h display this help and exit" << std::endl
|
||||||
|
<< std::endl
|
||||||
|
<< "Examples:" << std::endl
|
||||||
|
<< "hdfs_chgrp -R new_group hdfs://localhost.localdomain:8020/dir/file"
|
||||||
|
<< std::endl
|
||||||
|
<< "hdfs_chgrp new_group /dir/file" << std::endl;
|
||||||
|
return desc.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chgrp::Do() {
|
||||||
|
if (!Initialize()) {
|
||||||
|
std::cerr << "Unable to initialize HDFS chgrp tool" << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ValidateConstraints()) {
|
||||||
|
std::cout << GetDescription();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (opt_val_.count("help") > 0) {
|
||||||
|
return HandleHelp();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (opt_val_.count("file") > 0 && opt_val_.count("group") > 0) {
|
||||||
|
const auto file = opt_val_["file"].as<std::string>();
|
||||||
|
const auto recursive = opt_val_.count("recursive") > 0;
|
||||||
|
const auto group = opt_val_["group"].as<std::string>();
|
||||||
|
return HandlePath(group, recursive, file);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chgrp::HandleHelp() const {
|
||||||
|
std::cout << GetDescription();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chgrp::HandlePath(const std::string &group, const bool recursive,
|
||||||
|
const std::string &file) const {
|
||||||
|
// Building a URI object from the given file
|
||||||
|
auto uri = hdfs::parse_path_or_exit(file);
|
||||||
|
|
||||||
|
const auto fs = hdfs::doConnect(uri, true);
|
||||||
|
if (!fs) {
|
||||||
|
std::cerr << "Could not connect the file system. " << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap async FileSystem::SetOwner with promise to make it a blocking call
|
||||||
|
const auto promise = std::make_shared<std::promise<hdfs::Status>>();
|
||||||
|
auto future(promise->get_future());
|
||||||
|
auto handler = [promise](const hdfs::Status &s) { promise->set_value(s); };
|
||||||
|
|
||||||
|
if (!recursive) {
|
||||||
|
fs->SetOwner(uri.get_path(), "", group, handler);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Allocating shared state, which includes: username and groupname to be
|
||||||
|
* set, handler to be called, request counter, and a boolean to keep track
|
||||||
|
* if find is done
|
||||||
|
*/
|
||||||
|
const auto state =
|
||||||
|
std::make_shared<OwnerState>("", group, handler, 0, false);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Keep requesting more from Find until we process the entire listing. Call
|
||||||
|
* handler when Find is done and request counter is 0. Find guarantees that
|
||||||
|
* the handler will only be called once at a time so we do not need locking
|
||||||
|
* in handler_find.
|
||||||
|
*/
|
||||||
|
auto handler_find = [fs,
|
||||||
|
state](const hdfs::Status &status_find,
|
||||||
|
const std::vector<hdfs::StatInfo> &stat_infos,
|
||||||
|
const bool has_more_results) -> bool {
|
||||||
|
/*
|
||||||
|
* For each result returned by Find we call async SetOwner with the
|
||||||
|
* handler below. SetOwner DOES NOT guarantee that the handler will only
|
||||||
|
* be called once at a time, so we DO need locking in handler_set_owner.
|
||||||
|
*/
|
||||||
|
auto handler_set_owner = [state](const hdfs::Status &status_set_owner) {
|
||||||
|
std::lock_guard guard(state->lock);
|
||||||
|
|
||||||
|
// Decrement the counter once since we are done with this async call
|
||||||
|
if (!status_set_owner.ok() && state->status.ok()) {
|
||||||
|
// We make sure we set state->status only on the first error.
|
||||||
|
state->status = status_set_owner;
|
||||||
|
}
|
||||||
|
|
||||||
|
state->request_counter--;
|
||||||
|
if (state->request_counter == 0 && state->find_is_done) {
|
||||||
|
state->handler(state->status); // exit
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!stat_infos.empty() && state->status.ok()) {
|
||||||
|
for (const auto &s : stat_infos) {
|
||||||
|
// Launch an asynchronous call to SetOwner for every returned result
|
||||||
|
state->request_counter++;
|
||||||
|
fs->SetOwner(s.full_path, state->user, state->group,
|
||||||
|
handler_set_owner);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Lock this section because handler_set_owner might be accessing the same
|
||||||
|
* shared variables simultaneously.
|
||||||
|
*/
|
||||||
|
std::lock_guard guard(state->lock);
|
||||||
|
if (!status_find.ok() && state->status.ok()) {
|
||||||
|
// We make sure we set state->status only on the first error.
|
||||||
|
state->status = status_find;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!has_more_results) {
|
||||||
|
state->find_is_done = true;
|
||||||
|
if (state->request_counter == 0) {
|
||||||
|
state->handler(state->status); // exit
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Asynchronous call to Find
|
||||||
|
fs->Find(uri.get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(),
|
||||||
|
handler_find);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block until promise is set
|
||||||
|
const auto status = future.get();
|
||||||
|
if (!status.ok()) {
|
||||||
|
std::cerr << "Error: " << status.ToString() << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
} // namespace hdfs::tools
|
|
@ -0,0 +1,96 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef LIBHDFSPP_TOOLS_HDFS_CHGRP
|
||||||
|
#define LIBHDFSPP_TOOLS_HDFS_CHGRP
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include <boost/program_options.hpp>
|
||||||
|
|
||||||
|
#include "hdfs-tool.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools {
|
||||||
|
/**
|
||||||
|
* {@class Chgrp} is an {@class HdfsTool} that changes the owner and/or group of
|
||||||
|
* each file to owner and/or group.
|
||||||
|
*/
|
||||||
|
class Chgrp : public HdfsTool {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
Chgrp(int argc, char **argv);
|
||||||
|
|
||||||
|
// Abiding to the Rule of 5
|
||||||
|
Chgrp(const Chgrp &) = default;
|
||||||
|
Chgrp(Chgrp &&) = default;
|
||||||
|
Chgrp &operator=(const Chgrp &) = delete;
|
||||||
|
Chgrp &operator=(Chgrp &&) = delete;
|
||||||
|
~Chgrp() override = default;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] std::string GetDescription() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool Do() override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool Initialize() override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool ValidateConstraints() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool HandleHelp() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle the path to the file argument that's passed to this tool.
|
||||||
|
*
|
||||||
|
* @param group The name of the group to which to change to.
|
||||||
|
* @param recursive Whether this operation needs to be performed recursively
|
||||||
|
* on all the files in the given path's sub-directory.
|
||||||
|
* @param file The path to the file whose group needs to be changed.
|
||||||
|
*
|
||||||
|
* @return A boolean indicating the result of this operation.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] virtual bool HandlePath(const std::string &group,
|
||||||
|
bool recursive,
|
||||||
|
const std::string &file) const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
/**
|
||||||
|
* A boost data-structure containing the description of positional arguments
|
||||||
|
* passed to the command-line.
|
||||||
|
*/
|
||||||
|
po::positional_options_description pos_opt_desc_;
|
||||||
|
};
|
||||||
|
} // namespace hdfs::tools
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,52 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <exception>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
#include <google/protobuf/stubs/common.h>
|
||||||
|
|
||||||
|
#include "hdfs-chgrp.h"
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]) {
|
||||||
|
const auto result = std::atexit([]() -> void {
|
||||||
|
// Clean up static data on exit and prevent valgrind memory leaks
|
||||||
|
google::protobuf::ShutdownProtobufLibrary();
|
||||||
|
});
|
||||||
|
if (result != 0) {
|
||||||
|
std::cerr << "Error: Unable to schedule clean-up tasks for HDFS chgrp "
|
||||||
|
"tool, exiting"
|
||||||
|
<< std::endl;
|
||||||
|
std::exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
hdfs::tools::Chgrp chgrp(argc, argv);
|
||||||
|
auto success = false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
success = chgrp.Do();
|
||||||
|
} catch (const std::exception &e) {
|
||||||
|
std::cerr << "Error: " << e.what() << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!success) {
|
||||||
|
std::exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -0,0 +1,28 @@
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
add_library(hdfs_chmod_lib STATIC $<TARGET_OBJECTS:hdfs_tool_obj> $<TARGET_OBJECTS:hdfs_ownership_obj> hdfs-chmod.cc)
|
||||||
|
target_include_directories(hdfs_chmod_lib PRIVATE ../../tools hdfs-chmod ${Boost_INCLUDE_DIRS})
|
||||||
|
target_link_libraries(hdfs_chmod_lib PRIVATE Boost::boost Boost::program_options tools_common hdfspp_static)
|
||||||
|
|
||||||
|
add_executable(hdfs_chmod main.cc)
|
||||||
|
target_include_directories(hdfs_chmod PRIVATE ../../tools)
|
||||||
|
target_link_libraries(hdfs_chmod PRIVATE hdfs_chmod_lib)
|
||||||
|
|
||||||
|
install(TARGETS hdfs_chmod RUNTIME DESTINATION bin)
|
||||||
|
|
|
@ -0,0 +1,232 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <future>
|
||||||
|
#include <iostream>
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
#include <ostream>
|
||||||
|
#include <sstream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "hdfs-chmod.h"
|
||||||
|
#include "tools_common.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools {
|
||||||
|
Chmod::Chmod(const int argc, char **argv) : HdfsTool(argc, argv) {}
|
||||||
|
|
||||||
|
bool Chmod::Initialize() {
|
||||||
|
auto add_options = opt_desc_.add_options();
|
||||||
|
add_options("help,h", "Change the permissions of each FILE to MODE.");
|
||||||
|
add_options("file", po::value<std::string>(),
|
||||||
|
"The path to the file whose permissions needs to be modified");
|
||||||
|
add_options("recursive,R", "Operate on files and directories recursively");
|
||||||
|
add_options("permissions", po::value<std::string>(),
|
||||||
|
"Octal representation of the permission bits");
|
||||||
|
|
||||||
|
// An exception is thrown if these arguments are missing or if the arguments'
|
||||||
|
// count doesn't tally.
|
||||||
|
pos_opt_desc_.add("permissions", 1);
|
||||||
|
pos_opt_desc_.add("file", 1);
|
||||||
|
|
||||||
|
po::store(po::command_line_parser(argc_, argv_)
|
||||||
|
.options(opt_desc_)
|
||||||
|
.positional(pos_opt_desc_)
|
||||||
|
.run(),
|
||||||
|
opt_val_);
|
||||||
|
po::notify(opt_val_);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chmod::ValidateConstraints() const {
|
||||||
|
// Only "help" is allowed as single argument
|
||||||
|
if (argc_ == 2) {
|
||||||
|
return opt_val_.count("help");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rest of the cases must contain more than 2 arguments on the command line
|
||||||
|
return argc_ > 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string Chmod ::GetDescription() const {
|
||||||
|
std::stringstream desc;
|
||||||
|
desc << "Usage: hdfs_chmod [OPTION] <MODE[,MODE]... | OCTALMODE> FILE"
|
||||||
|
<< std::endl
|
||||||
|
<< std::endl
|
||||||
|
<< "Change the permissions of each FILE to MODE." << std::endl
|
||||||
|
<< "The user must be the owner of the file, or else a super-user."
|
||||||
|
<< std::endl
|
||||||
|
<< "Additional information is in the Permissions Guide:" << std::endl
|
||||||
|
<< "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/"
|
||||||
|
"hadoop-hdfs/HdfsPermissionsGuide.html"
|
||||||
|
<< std::endl
|
||||||
|
<< std::endl
|
||||||
|
<< " -R operate on files and directories recursively" << std::endl
|
||||||
|
<< " -h display this help and exit" << std::endl
|
||||||
|
<< std::endl
|
||||||
|
<< "Examples:" << std::endl
|
||||||
|
<< "hdfs_chmod -R 755 hdfs://localhost.localdomain:8020/dir/file"
|
||||||
|
<< std::endl
|
||||||
|
<< "hdfs_chmod 777 /dir/file" << std::endl;
|
||||||
|
return desc.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chmod::Do() {
|
||||||
|
if (!Initialize()) {
|
||||||
|
std::cerr << "Unable to initialize HDFS chmod tool" << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ValidateConstraints()) {
|
||||||
|
std::cout << GetDescription();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (opt_val_.count("help") > 0) {
|
||||||
|
return HandleHelp();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (opt_val_.count("file") > 0 && opt_val_.count("permissions") > 0) {
|
||||||
|
const auto file = opt_val_["file"].as<std::string>();
|
||||||
|
const auto recursive = opt_val_.count("recursive") > 0;
|
||||||
|
const auto permissions = opt_val_["permissions"].as<std::string>();
|
||||||
|
return HandlePath(permissions, recursive, file);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chmod::HandleHelp() const {
|
||||||
|
std::cout << GetDescription();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chmod::HandlePath(const std::string &permissions, const bool recursive,
|
||||||
|
const std::string &file) const {
|
||||||
|
// Building a URI object from the given uri_path
|
||||||
|
auto uri = hdfs::parse_path_or_exit(file);
|
||||||
|
|
||||||
|
const auto fs = hdfs::doConnect(uri, true);
|
||||||
|
if (!fs) {
|
||||||
|
std::cerr << "Could not connect the file system. " << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wrap async FileSystem::SetPermission with promise to make it a blocking
|
||||||
|
* call.
|
||||||
|
*/
|
||||||
|
const auto promise = std::make_shared<std::promise<hdfs::Status>>();
|
||||||
|
auto future(promise->get_future());
|
||||||
|
auto handler = [promise](const hdfs::Status &s) { promise->set_value(s); };
|
||||||
|
|
||||||
|
/*
|
||||||
|
* strtol is reading the value with base 8, NULL because we are reading in
|
||||||
|
* just one value.
|
||||||
|
*/
|
||||||
|
auto perm = static_cast<uint16_t>(strtol(permissions.c_str(), nullptr, 8));
|
||||||
|
if (!recursive) {
|
||||||
|
fs->SetPermission(uri.get_path(), perm, handler);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Allocating shared state, which includes -
|
||||||
|
* 1. Permissions to be set
|
||||||
|
* 2. Handler to be called
|
||||||
|
* 3. Request counter
|
||||||
|
* 4. A boolean to keep track if find is done
|
||||||
|
*/
|
||||||
|
auto state = std::make_shared<PermissionState>(perm, handler, 0, false);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Keep requesting more from Find until we process the entire listing. Call
|
||||||
|
* handler when Find is done and request counter is 0. Find guarantees that
|
||||||
|
* the handler will only be called once at a time so we do not need locking
|
||||||
|
* in handler_find.
|
||||||
|
*/
|
||||||
|
auto handler_find = [fs,
|
||||||
|
state](const hdfs::Status &status_find,
|
||||||
|
const std::vector<hdfs::StatInfo> &stat_infos,
|
||||||
|
const bool has_more_results) -> bool {
|
||||||
|
/*
|
||||||
|
* For each result returned by Find we call async SetPermission with the
|
||||||
|
* handler below. SetPermission DOES NOT guarantee that the handler will
|
||||||
|
* only be called once at a time, so we DO need locking in
|
||||||
|
* handler_set_permission.
|
||||||
|
*/
|
||||||
|
auto handler_set_permission =
|
||||||
|
[state](const hdfs::Status &status_set_permission) {
|
||||||
|
std::lock_guard guard(state->lock);
|
||||||
|
|
||||||
|
// Decrement the counter once since we are done with this async call
|
||||||
|
if (!status_set_permission.ok() && state->status.ok()) {
|
||||||
|
// We make sure we set state->status only on the first error.
|
||||||
|
state->status = status_set_permission;
|
||||||
|
}
|
||||||
|
state->request_counter--;
|
||||||
|
if (state->request_counter == 0 && state->find_is_done) {
|
||||||
|
state->handler(state->status); // exit
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!stat_infos.empty() && state->status.ok()) {
|
||||||
|
for (const auto &s : stat_infos) {
|
||||||
|
/*
|
||||||
|
* Launch an asynchronous call to SetPermission for every returned
|
||||||
|
* result
|
||||||
|
*/
|
||||||
|
state->request_counter++;
|
||||||
|
fs->SetPermission(s.full_path, state->permissions,
|
||||||
|
handler_set_permission);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Lock this section because handler_set_permission might be accessing the
|
||||||
|
* same shared variables simultaneously
|
||||||
|
*/
|
||||||
|
std::lock_guard guard(state->lock);
|
||||||
|
if (!status_find.ok() && state->status.ok()) {
|
||||||
|
// We make sure we set state->status only on the first error.
|
||||||
|
state->status = status_find;
|
||||||
|
}
|
||||||
|
if (!has_more_results) {
|
||||||
|
state->find_is_done = true;
|
||||||
|
if (state->request_counter == 0) {
|
||||||
|
state->handler(state->status); // exit
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Asynchronous call to Find
|
||||||
|
fs->Find(uri.get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(),
|
||||||
|
handler_find);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block until promise is set
|
||||||
|
const auto status = future.get();
|
||||||
|
if (!status.ok()) {
|
||||||
|
std::cerr << "Error: " << status.ToString() << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
} // namespace hdfs::tools
|
|
@ -0,0 +1,133 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef LIBHDFSPP_TOOLS_HDFS_CHMOD
|
||||||
|
#define LIBHDFSPP_TOOLS_HDFS_CHMOD
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <mutex>
|
||||||
|
#include <string>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
#include <boost/program_options.hpp>
|
||||||
|
|
||||||
|
#include "hdfs-tool.h"
|
||||||
|
#include "hdfspp/status.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools {
|
||||||
|
struct PermissionState {
|
||||||
|
PermissionState(const uint16_t permissions,
|
||||||
|
std::function<void(const hdfs::Status &)> handler,
|
||||||
|
const uint64_t request_counter, const bool find_is_done)
|
||||||
|
: permissions(permissions), handler(std::move(handler)),
|
||||||
|
request_counter(request_counter), find_is_done(find_is_done) {}
|
||||||
|
|
||||||
|
const uint16_t permissions;
|
||||||
|
const std::function<void(const hdfs::Status &)> handler;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The request counter is incremented once every time SetOwner async call is
|
||||||
|
* made
|
||||||
|
*/
|
||||||
|
uint64_t request_counter;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This boolean will be set when find returns the last result
|
||||||
|
*/
|
||||||
|
bool find_is_done{false};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Final status to be returned
|
||||||
|
*/
|
||||||
|
hdfs::Status status;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shared variables will need protection with a lock
|
||||||
|
*/
|
||||||
|
std::mutex lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@class Chmod} is an {@class HdfsTool} that changes the permissions to a
|
||||||
|
* file or folder.
|
||||||
|
*/
|
||||||
|
class Chmod : public HdfsTool {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
Chmod(int argc, char **argv);
|
||||||
|
|
||||||
|
// Abiding to the Rule of 5
|
||||||
|
Chmod(const Chmod &) = default;
|
||||||
|
Chmod(Chmod &&) = default;
|
||||||
|
Chmod &operator=(const Chmod &) = delete;
|
||||||
|
Chmod &operator=(Chmod &&) = delete;
|
||||||
|
~Chmod() override = default;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] std::string GetDescription() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool Do() override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool Initialize() override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool ValidateConstraints() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool HandleHelp() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle the file to the file argument that's passed to this tool.
|
||||||
|
*
|
||||||
|
* @param permissions An octal representation of the new permissions to be
|
||||||
|
* assigned.
|
||||||
|
* @param recursive Whether this operation needs to be performed recursively
|
||||||
|
* on all the files in the given path's sub-directory.
|
||||||
|
* @param file The path to the file whose ownership needs to be changed.
|
||||||
|
*
|
||||||
|
* @return A boolean indicating the result of this operation.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] virtual bool HandlePath(const std::string &permissions,
|
||||||
|
bool recursive,
|
||||||
|
const std::string &file) const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
/**
|
||||||
|
* A boost data-structure containing the description of positional arguments
|
||||||
|
* passed to the command-line.
|
||||||
|
*/
|
||||||
|
po::positional_options_description pos_opt_desc_;
|
||||||
|
};
|
||||||
|
} // namespace hdfs::tools
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,52 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <exception>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
#include <google/protobuf/stubs/common.h>
|
||||||
|
|
||||||
|
#include "hdfs-chmod.h"
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]) {
|
||||||
|
const auto result = std::atexit([]() -> void {
|
||||||
|
// Clean up static data on exit and prevent valgrind memory leaks
|
||||||
|
google::protobuf::ShutdownProtobufLibrary();
|
||||||
|
});
|
||||||
|
if (result != 0) {
|
||||||
|
std::cerr << "Error: Unable to schedule clean-up tasks for HDFS chmod "
|
||||||
|
"tool, exiting"
|
||||||
|
<< std::endl;
|
||||||
|
std::exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
hdfs::tools::Chmod chmod(argc, argv);
|
||||||
|
auto success = false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
success = chmod.Do();
|
||||||
|
} catch (const std::exception &e) {
|
||||||
|
std::cerr << "Error: " << e.what() << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!success) {
|
||||||
|
std::exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
add_library(hdfs_chown_lib STATIC $<TARGET_OBJECTS:hdfs_tool_obj> $<TARGET_OBJECTS:hdfs_ownership_obj> hdfs-chown.cc)
|
||||||
|
target_include_directories(hdfs_chown_lib PRIVATE ../../tools ${Boost_INCLUDE_DIRS})
|
||||||
|
target_link_libraries(hdfs_chown_lib PRIVATE Boost::boost Boost::program_options tools_common hdfspp_static)
|
||||||
|
|
||||||
|
add_executable(hdfs_chown main.cc)
|
||||||
|
target_include_directories(hdfs_chown PRIVATE ../../tools)
|
||||||
|
target_link_libraries(hdfs_chown PRIVATE hdfs_chown_lib)
|
||||||
|
|
||||||
|
install(TARGETS hdfs_chown RUNTIME DESTINATION bin)
|
|
@ -0,0 +1,229 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <future>
|
||||||
|
#include <iostream>
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
#include <ostream>
|
||||||
|
#include <sstream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "hdfs-chown.h"
|
||||||
|
#include "tools_common.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools {
|
||||||
|
Chown::Chown(const int argc, char **argv) : HdfsTool(argc, argv) {}
|
||||||
|
|
||||||
|
bool Chown::Initialize() {
|
||||||
|
auto add_options = opt_desc_.add_options();
|
||||||
|
add_options(
|
||||||
|
"help,h",
|
||||||
|
"Change the owner and/or group of each FILE to OWNER and/or GROUP.");
|
||||||
|
add_options("file", po::value<std::string>(),
|
||||||
|
"The path to the file whose ownership needs to be modified");
|
||||||
|
add_options("recursive,R", "Operate on files and directories recursively");
|
||||||
|
add_options(
|
||||||
|
"user-group", po::value<std::string>(),
|
||||||
|
"The user:group to which the file's ownership needs to be changed to");
|
||||||
|
|
||||||
|
// An exception is thrown if these arguments are missing or if the arguments'
|
||||||
|
// count doesn't tally.
|
||||||
|
pos_opt_desc_.add("user-group", 1);
|
||||||
|
pos_opt_desc_.add("file", 1);
|
||||||
|
|
||||||
|
po::store(po::command_line_parser(argc_, argv_)
|
||||||
|
.options(opt_desc_)
|
||||||
|
.positional(pos_opt_desc_)
|
||||||
|
.run(),
|
||||||
|
opt_val_);
|
||||||
|
po::notify(opt_val_);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chown::ValidateConstraints() const {
|
||||||
|
// Only "help" is allowed as single argument
|
||||||
|
if (argc_ == 2) {
|
||||||
|
return opt_val_.count("help");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rest of the cases must contain more than 2 arguments on the command line
|
||||||
|
return argc_ > 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string Chown ::GetDescription() const {
|
||||||
|
std::stringstream desc;
|
||||||
|
desc << "Usage: hdfs_chown [OPTION] [OWNER][:[GROUP]] FILE" << std::endl
|
||||||
|
<< std::endl
|
||||||
|
<< "Change the owner and/or group of each FILE to OWNER and/or GROUP."
|
||||||
|
<< std::endl
|
||||||
|
<< "The user must be a super-user. Additional information is in the "
|
||||||
|
"Permissions Guide:"
|
||||||
|
<< std::endl
|
||||||
|
<< "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/"
|
||||||
|
"hadoop-hdfs/HdfsPermissionsGuide.html"
|
||||||
|
<< std::endl
|
||||||
|
<< std::endl
|
||||||
|
<< " -R operate on files and directories recursively" << std::endl
|
||||||
|
<< " -h display this help and exit" << std::endl
|
||||||
|
<< std::endl
|
||||||
|
<< "Owner is unchanged if missing. Group is unchanged if missing."
|
||||||
|
<< std::endl
|
||||||
|
<< "OWNER and GROUP may be numeric as well as symbolic." << std::endl
|
||||||
|
<< std::endl
|
||||||
|
<< "Examples:" << std::endl
|
||||||
|
<< "hdfs_chown -R new_owner:new_group "
|
||||||
|
"hdfs://localhost.localdomain:8020/dir/file"
|
||||||
|
<< std::endl
|
||||||
|
<< "hdfs_chown new_owner /dir/file" << std::endl;
|
||||||
|
return desc.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chown::Do() {
|
||||||
|
if (!Initialize()) {
|
||||||
|
std::cerr << "Unable to initialize HDFS chown tool" << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ValidateConstraints()) {
|
||||||
|
std::cout << GetDescription();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (opt_val_.count("help") > 0) {
|
||||||
|
return HandleHelp();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (opt_val_.count("file") > 0 && opt_val_.count("user-group") > 0) {
|
||||||
|
const auto file = opt_val_["file"].as<std::string>();
|
||||||
|
const auto recursive = opt_val_.count("recursive") > 0;
|
||||||
|
const Ownership ownership(opt_val_["user-group"].as<std::string>());
|
||||||
|
return HandlePath(ownership, recursive, file);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chown::HandleHelp() const {
|
||||||
|
std::cout << GetDescription();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Chown::HandlePath(const Ownership &ownership, const bool recursive,
|
||||||
|
const std::string &file) const {
|
||||||
|
// Building a URI object from the given uri_path
|
||||||
|
auto uri = hdfs::parse_path_or_exit(file);
|
||||||
|
|
||||||
|
const auto fs = hdfs::doConnect(uri, true);
|
||||||
|
if (!fs) {
|
||||||
|
std::cerr << "Could not connect the file system. " << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap async FileSystem::SetOwner with promise to make it a blocking call
|
||||||
|
auto promise = std::make_shared<std::promise<hdfs::Status>>();
|
||||||
|
auto future(promise->get_future());
|
||||||
|
auto handler = [promise](const hdfs::Status &s) { promise->set_value(s); };
|
||||||
|
|
||||||
|
if (!recursive) {
|
||||||
|
fs->SetOwner(uri.get_path(), ownership.GetUser(),
|
||||||
|
ownership.GetGroup().value_or(""), handler);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Allocating shared state, which includes: username and groupname to be
|
||||||
|
* set, handler to be called, request counter, and a boolean to keep track
|
||||||
|
* if find is done
|
||||||
|
*/
|
||||||
|
auto state = std::make_shared<OwnerState>(ownership.GetUser(),
|
||||||
|
ownership.GetGroup().value_or(""),
|
||||||
|
handler, 0, false);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Keep requesting more from Find until we process the entire listing. Call
|
||||||
|
* handler when Find is done and request counter is 0. Find guarantees that
|
||||||
|
* the handler will only be called once at a time so we do not need locking
|
||||||
|
* in handler_find.
|
||||||
|
*/
|
||||||
|
auto handler_find = [fs,
|
||||||
|
state](const hdfs::Status &status_find,
|
||||||
|
const std::vector<hdfs::StatInfo> &stat_infos,
|
||||||
|
const bool has_more_results) -> bool {
|
||||||
|
/*
|
||||||
|
* For each result returned by Find we call async SetOwner with the
|
||||||
|
* handler below. SetOwner DOES NOT guarantee that the handler will only
|
||||||
|
* be called once at a time, so we DO need locking in handler_set_owner.
|
||||||
|
*/
|
||||||
|
auto handler_set_owner = [state](const hdfs::Status &status_set_owner) {
|
||||||
|
std::lock_guard guard(state->lock);
|
||||||
|
|
||||||
|
// Decrement the counter once since we are done with this async call
|
||||||
|
if (!status_set_owner.ok() && state->status.ok()) {
|
||||||
|
// We make sure we set state->status only on the first error.
|
||||||
|
state->status = status_set_owner;
|
||||||
|
}
|
||||||
|
|
||||||
|
state->request_counter--;
|
||||||
|
if (state->request_counter == 0 && state->find_is_done) {
|
||||||
|
state->handler(state->status); // exit
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!stat_infos.empty() && state->status.ok()) {
|
||||||
|
for (const auto &s : stat_infos) {
|
||||||
|
// Launch an asynchronous call to SetOwner for every returned result
|
||||||
|
state->request_counter++;
|
||||||
|
fs->SetOwner(s.full_path, state->user, state->group,
|
||||||
|
handler_set_owner);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Lock this section because handler_set_owner might be accessing the same
|
||||||
|
* shared variables simultaneously.
|
||||||
|
*/
|
||||||
|
std::lock_guard guard(state->lock);
|
||||||
|
if (!status_find.ok() && state->status.ok()) {
|
||||||
|
// We make sure we set state->status only on the first error.
|
||||||
|
state->status = status_find;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!has_more_results) {
|
||||||
|
state->find_is_done = true;
|
||||||
|
if (state->request_counter == 0) {
|
||||||
|
state->handler(state->status); // exit
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Asynchronous call to Find
|
||||||
|
fs->Find(uri.get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(),
|
||||||
|
handler_find);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block until promise is set
|
||||||
|
const auto status = future.get();
|
||||||
|
if (!status.ok()) {
|
||||||
|
std::cerr << "Error: " << status.ToString() << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
} // namespace hdfs::tools
|
|
@ -0,0 +1,97 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef LIBHDFSPP_TOOLS_HDFS_CHOWN
|
||||||
|
#define LIBHDFSPP_TOOLS_HDFS_CHOWN
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include <boost/program_options.hpp>
|
||||||
|
|
||||||
|
#include "hdfs-tool.h"
|
||||||
|
#include "internal/hdfs-ownership.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools {
|
||||||
|
/**
|
||||||
|
* {@class Chown} is an {@class HdfsTool} that changes the owner and/or group of
|
||||||
|
* each file to owner and/or group.
|
||||||
|
*/
|
||||||
|
class Chown : public HdfsTool {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
Chown(int argc, char **argv);
|
||||||
|
|
||||||
|
// Abiding to the Rule of 5
|
||||||
|
Chown(const Chown &) = default;
|
||||||
|
Chown(Chown &&) = default;
|
||||||
|
Chown &operator=(const Chown &) = delete;
|
||||||
|
Chown &operator=(Chown &&) = delete;
|
||||||
|
~Chown() override = default;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] std::string GetDescription() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool Do() override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool Initialize() override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool ValidateConstraints() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritdoc}
|
||||||
|
*/
|
||||||
|
[[nodiscard]] bool HandleHelp() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle the file to the file argument that's passed to this tool.
|
||||||
|
*
|
||||||
|
* @param ownership The owner's user and group names.
|
||||||
|
* @param recursive Whether this operation needs to be performed recursively
|
||||||
|
* on all the files in the given path's sub-directory.
|
||||||
|
* @param file The path to the file whose ownership needs to be changed.
|
||||||
|
*
|
||||||
|
* @return A boolean indicating the result of this operation.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] virtual bool HandlePath(const Ownership &ownership,
|
||||||
|
bool recursive,
|
||||||
|
const std::string &file) const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
/**
|
||||||
|
* A boost data-structure containing the description of positional arguments
|
||||||
|
* passed to the command-line.
|
||||||
|
*/
|
||||||
|
po::positional_options_description pos_opt_desc_;
|
||||||
|
};
|
||||||
|
} // namespace hdfs::tools
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,52 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <exception>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
#include <google/protobuf/stubs/common.h>
|
||||||
|
|
||||||
|
#include "hdfs-chown.h"
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]) {
|
||||||
|
const auto result = std::atexit([]() -> void {
|
||||||
|
// Clean up static data on exit and prevent valgrind memory leaks
|
||||||
|
google::protobuf::ShutdownProtobufLibrary();
|
||||||
|
});
|
||||||
|
if (result != 0) {
|
||||||
|
std::cerr << "Error: Unable to schedule clean-up tasks for HDFS chown "
|
||||||
|
"tool, exiting"
|
||||||
|
<< std::endl;
|
||||||
|
std::exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
hdfs::tools::Chown chown(argc, argv);
|
||||||
|
auto success = false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
success = chown.Do();
|
||||||
|
} catch (const std::exception &e) {
|
||||||
|
std::cerr << "Error: " << e.what() << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!success) {
|
||||||
|
std::exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -1,185 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
or more contributor license agreements. See the NOTICE file
|
|
||||||
distributed with this work for additional information
|
|
||||||
regarding copyright ownership. The ASF licenses this file
|
|
||||||
to you under the Apache License, Version 2.0 (the
|
|
||||||
"License"); you may not use this file except in compliance
|
|
||||||
with the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing,
|
|
||||||
software distributed under the License is distributed on an
|
|
||||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations
|
|
||||||
under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <google/protobuf/stubs/common.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <future>
|
|
||||||
#include "tools_common.h"
|
|
||||||
|
|
||||||
void usage(){
|
|
||||||
std::cout << "Usage: hdfs_chgrp [OPTION] GROUP FILE"
|
|
||||||
<< std::endl
|
|
||||||
<< std::endl << "Change the group association of each FILE to GROUP."
|
|
||||||
<< std::endl << "The user must be the owner of files. Additional information is in the Permissions Guide:"
|
|
||||||
<< std::endl << "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html"
|
|
||||||
<< std::endl
|
|
||||||
<< std::endl << " -R operate on files and directories recursively"
|
|
||||||
<< std::endl << " -h display this help and exit"
|
|
||||||
<< std::endl
|
|
||||||
<< std::endl << "Examples:"
|
|
||||||
<< std::endl << "hdfs_chgrp -R new_group hdfs://localhost.localdomain:8020/dir/file"
|
|
||||||
<< std::endl << "hdfs_chgrp new_group /dir/file"
|
|
||||||
<< std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct SetOwnerState {
|
|
||||||
const std::string username;
|
|
||||||
const std::string groupname;
|
|
||||||
const std::function<void(const hdfs::Status &)> handler;
|
|
||||||
//The request counter is incremented once every time SetOwner async call is made
|
|
||||||
uint64_t request_counter;
|
|
||||||
//This boolean will be set when find returns the last result
|
|
||||||
bool find_is_done;
|
|
||||||
//Final status to be returned
|
|
||||||
hdfs::Status status;
|
|
||||||
//Shared variables will need protection with a lock
|
|
||||||
std::mutex lock;
|
|
||||||
SetOwnerState(const std::string & username_, const std::string & groupname_,
|
|
||||||
const std::function<void(const hdfs::Status &)> & handler_,
|
|
||||||
uint64_t request_counter_, bool find_is_done_)
|
|
||||||
: username(username_),
|
|
||||||
groupname(groupname_),
|
|
||||||
handler(handler_),
|
|
||||||
request_counter(request_counter_),
|
|
||||||
find_is_done(find_is_done_),
|
|
||||||
status(),
|
|
||||||
lock() {
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
int main(int argc, char *argv[]) {
|
|
||||||
//We should have 3 or 4 parameters
|
|
||||||
if (argc != 3 && argc != 4) {
|
|
||||||
usage();
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool recursive = false;
|
|
||||||
int input;
|
|
||||||
|
|
||||||
//Using GetOpt to read in the values
|
|
||||||
opterr = 0;
|
|
||||||
while ((input = getopt(argc, argv, "Rh")) != -1) {
|
|
||||||
switch (input)
|
|
||||||
{
|
|
||||||
case 'R':
|
|
||||||
recursive = 1;
|
|
||||||
break;
|
|
||||||
case 'h':
|
|
||||||
usage();
|
|
||||||
exit(EXIT_SUCCESS);
|
|
||||||
case '?':
|
|
||||||
if (isprint(optopt))
|
|
||||||
std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
|
|
||||||
else
|
|
||||||
std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
|
|
||||||
usage();
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
default:
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::string group = argv[optind];
|
|
||||||
//Owner stays the same, just group association changes.
|
|
||||||
std::string owner = "";
|
|
||||||
std::string uri_path = argv[optind + 1];
|
|
||||||
|
|
||||||
//Building a URI object from the given uri_path
|
|
||||||
hdfs::URI uri = hdfs::parse_path_or_exit(uri_path);
|
|
||||||
|
|
||||||
std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri, true);
|
|
||||||
if (!fs) {
|
|
||||||
std::cerr << "Could not connect the file system. " << std::endl;
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* wrap async FileSystem::SetOwner with promise to make it a blocking call */
|
|
||||||
std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
|
|
||||||
std::future<hdfs::Status> future(promise->get_future());
|
|
||||||
auto handler = [promise](const hdfs::Status &s) {
|
|
||||||
promise->set_value(s);
|
|
||||||
};
|
|
||||||
|
|
||||||
if(!recursive){
|
|
||||||
fs->SetOwner(uri.get_path(), owner, group, handler);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
//Allocating shared state, which includes:
|
|
||||||
//username and groupname to be set, handler to be called, request counter, and a boolean to keep track if find is done
|
|
||||||
std::shared_ptr<SetOwnerState> state = std::make_shared<SetOwnerState>(owner, group, handler, 0, false);
|
|
||||||
|
|
||||||
// Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
|
|
||||||
// Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
|
|
||||||
auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
|
|
||||||
|
|
||||||
//For each result returned by Find we call async SetOwner with the handler below.
|
|
||||||
//SetOwner DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetOwner.
|
|
||||||
auto handlerSetOwner = [state](const hdfs::Status &status_set_owner) {
|
|
||||||
std::lock_guard<std::mutex> guard(state->lock);
|
|
||||||
|
|
||||||
//Decrement the counter once since we are done with this async call
|
|
||||||
if (!status_set_owner.ok() && state->status.ok()){
|
|
||||||
//We make sure we set state->status only on the first error.
|
|
||||||
state->status = status_set_owner;
|
|
||||||
}
|
|
||||||
state->request_counter--;
|
|
||||||
if(state->request_counter == 0 && state->find_is_done){
|
|
||||||
state->handler(state->status); //exit
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if(!stat_infos.empty() && state->status.ok()) {
|
|
||||||
for (hdfs::StatInfo const& s : stat_infos) {
|
|
||||||
//Launch an asynchronous call to SetOwner for every returned result
|
|
||||||
state->request_counter++;
|
|
||||||
fs->SetOwner(s.full_path, state->username, state->groupname, handlerSetOwner);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//Lock this section because handlerSetOwner might be accessing the same
|
|
||||||
//shared variables simultaneously
|
|
||||||
std::lock_guard<std::mutex> guard(state->lock);
|
|
||||||
if (!status_find.ok() && state->status.ok()){
|
|
||||||
//We make sure we set state->status only on the first error.
|
|
||||||
state->status = status_find;
|
|
||||||
}
|
|
||||||
if(!has_more_results){
|
|
||||||
state->find_is_done = true;
|
|
||||||
if(state->request_counter == 0){
|
|
||||||
state->handler(state->status); //exit
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
//Asynchronous call to Find
|
|
||||||
fs->Find(uri.get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* block until promise is set */
|
|
||||||
hdfs::Status status = future.get();
|
|
||||||
if (!status.ok()) {
|
|
||||||
std::cerr << "Error: " << status.ToString() << std::endl;
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up static data and prevent valgrind memory leaks
|
|
||||||
google::protobuf::ShutdownProtobufLibrary();
|
|
||||||
return 0;
|
|
||||||
}
|
|
|
@ -1,183 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
or more contributor license agreements. See the NOTICE file
|
|
||||||
distributed with this work for additional information
|
|
||||||
regarding copyright ownership. The ASF licenses this file
|
|
||||||
to you under the Apache License, Version 2.0 (the
|
|
||||||
"License"); you may not use this file except in compliance
|
|
||||||
with the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing,
|
|
||||||
software distributed under the License is distributed on an
|
|
||||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations
|
|
||||||
under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <google/protobuf/stubs/common.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <future>
|
|
||||||
#include "tools_common.h"
|
|
||||||
|
|
||||||
void usage(){
|
|
||||||
std::cout << "Usage: hdfs_chmod [OPTION] <MODE[,MODE]... | OCTALMODE> FILE"
|
|
||||||
<< std::endl
|
|
||||||
<< std::endl << "Change the permissions of each FILE to MODE."
|
|
||||||
<< std::endl << "The user must be the owner of the file, or else a super-user."
|
|
||||||
<< std::endl << "Additional information is in the Permissions Guide:"
|
|
||||||
<< std::endl << "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html"
|
|
||||||
<< std::endl
|
|
||||||
<< std::endl << " -R operate on files and directories recursively"
|
|
||||||
<< std::endl << " -h display this help and exit"
|
|
||||||
<< std::endl
|
|
||||||
<< std::endl << "Examples:"
|
|
||||||
<< std::endl << "hdfs_chmod -R 755 hdfs://localhost.localdomain:8020/dir/file"
|
|
||||||
<< std::endl << "hdfs_chmod 777 /dir/file"
|
|
||||||
<< std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct SetPermissionState {
|
|
||||||
const uint16_t permissions;
|
|
||||||
const std::function<void(const hdfs::Status &)> handler;
|
|
||||||
//The request counter is incremented once every time SetOwner async call is made
|
|
||||||
uint64_t request_counter;
|
|
||||||
//This boolean will be set when find returns the last result
|
|
||||||
bool find_is_done;
|
|
||||||
//Final status to be returned
|
|
||||||
hdfs::Status status;
|
|
||||||
//Shared variables will need protection with a lock
|
|
||||||
std::mutex lock;
|
|
||||||
SetPermissionState(const uint16_t permissions_, const std::function<void(const hdfs::Status &)> & handler_,
|
|
||||||
uint64_t request_counter_, bool find_is_done_)
|
|
||||||
: permissions(permissions_),
|
|
||||||
handler(handler_),
|
|
||||||
request_counter(request_counter_),
|
|
||||||
find_is_done(find_is_done_),
|
|
||||||
status(),
|
|
||||||
lock() {
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
int main(int argc, char *argv[]) {
|
|
||||||
//We should have 3 or 4 parameters
|
|
||||||
if (argc != 3 && argc != 4) {
|
|
||||||
usage();
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool recursive = false;
|
|
||||||
int input;
|
|
||||||
|
|
||||||
//Using GetOpt to read in the values
|
|
||||||
opterr = 0;
|
|
||||||
while ((input = getopt(argc, argv, "Rh")) != -1) {
|
|
||||||
switch (input)
|
|
||||||
{
|
|
||||||
case 'R':
|
|
||||||
recursive = 1;
|
|
||||||
break;
|
|
||||||
case 'h':
|
|
||||||
usage();
|
|
||||||
exit(EXIT_SUCCESS);
|
|
||||||
case '?':
|
|
||||||
if (isprint(optopt))
|
|
||||||
std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
|
|
||||||
else
|
|
||||||
std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
|
|
||||||
usage();
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
default:
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::string permissions = argv[optind];
|
|
||||||
std::string uri_path = argv[optind + 1];
|
|
||||||
|
|
||||||
//Building a URI object from the given uri_path
|
|
||||||
hdfs::URI uri = hdfs::parse_path_or_exit(uri_path);
|
|
||||||
|
|
||||||
std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri, true);
|
|
||||||
if (!fs) {
|
|
||||||
std::cerr << "Could not connect the file system. " << std::endl;
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* wrap async FileSystem::SetPermission with promise to make it a blocking call */
|
|
||||||
std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
|
|
||||||
std::future<hdfs::Status> future(promise->get_future());
|
|
||||||
auto handler = [promise](const hdfs::Status &s) {
|
|
||||||
promise->set_value(s);
|
|
||||||
};
|
|
||||||
|
|
||||||
//strtol() is reading the value with base 8, NULL because we are reading in just one value.
|
|
||||||
uint16_t perm = strtol(permissions.c_str(), NULL, 8);
|
|
||||||
if(!recursive){
|
|
||||||
fs->SetPermission(uri.get_path(), perm, handler);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
//Allocating shared state, which includes:
|
|
||||||
//permissions to be set, handler to be called, request counter, and a boolean to keep track if find is done
|
|
||||||
std::shared_ptr<SetPermissionState> state = std::make_shared<SetPermissionState>(perm, handler, 0, false);
|
|
||||||
|
|
||||||
// Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
|
|
||||||
// Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
|
|
||||||
auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
|
|
||||||
|
|
||||||
//For each result returned by Find we call async SetPermission with the handler below.
|
|
||||||
//SetPermission DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetPermission.
|
|
||||||
auto handlerSetPermission = [state](const hdfs::Status &status_set_permission) {
|
|
||||||
std::lock_guard<std::mutex> guard(state->lock);
|
|
||||||
|
|
||||||
//Decrement the counter once since we are done with this async call
|
|
||||||
if (!status_set_permission.ok() && state->status.ok()){
|
|
||||||
//We make sure we set state->status only on the first error.
|
|
||||||
state->status = status_set_permission;
|
|
||||||
}
|
|
||||||
state->request_counter--;
|
|
||||||
if(state->request_counter == 0 && state->find_is_done){
|
|
||||||
state->handler(state->status); //exit
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if(!stat_infos.empty() && state->status.ok()) {
|
|
||||||
for (hdfs::StatInfo const& s : stat_infos) {
|
|
||||||
//Launch an asynchronous call to SetPermission for every returned result
|
|
||||||
state->request_counter++;
|
|
||||||
fs->SetPermission(s.full_path, state->permissions, handlerSetPermission);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//Lock this section because handlerSetPermission might be accessing the same
|
|
||||||
//shared variables simultaneously
|
|
||||||
std::lock_guard<std::mutex> guard(state->lock);
|
|
||||||
if (!status_find.ok() && state->status.ok()){
|
|
||||||
//We make sure we set state->status only on the first error.
|
|
||||||
state->status = status_find;
|
|
||||||
}
|
|
||||||
if(!has_more_results){
|
|
||||||
state->find_is_done = true;
|
|
||||||
if(state->request_counter == 0){
|
|
||||||
state->handler(state->status); //exit
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
//Asynchronous call to Find
|
|
||||||
fs->Find(uri.get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* block until promise is set */
|
|
||||||
hdfs::Status status = future.get();
|
|
||||||
if (!status.ok()) {
|
|
||||||
std::cerr << "Error: " << status.ToString() << std::endl;
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up static data and prevent valgrind memory leaks
|
|
||||||
google::protobuf::ShutdownProtobufLibrary();
|
|
||||||
return 0;
|
|
||||||
}
|
|
|
@ -1,195 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
or more contributor license agreements. See the NOTICE file
|
|
||||||
distributed with this work for additional information
|
|
||||||
regarding copyright ownership. The ASF licenses this file
|
|
||||||
to you under the Apache License, Version 2.0 (the
|
|
||||||
"License"); you may not use this file except in compliance
|
|
||||||
with the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing,
|
|
||||||
software distributed under the License is distributed on an
|
|
||||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations
|
|
||||||
under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <google/protobuf/stubs/common.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <future>
|
|
||||||
#include "tools_common.h"
|
|
||||||
|
|
||||||
void usage(){
|
|
||||||
std::cout << "Usage: hdfs_chown [OPTION] [OWNER][:[GROUP]] FILE"
|
|
||||||
<< std::endl
|
|
||||||
<< std::endl << "Change the owner and/or group of each FILE to OWNER and/or GROUP."
|
|
||||||
<< std::endl << "The user must be a super-user. Additional information is in the Permissions Guide:"
|
|
||||||
<< std::endl << "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html"
|
|
||||||
<< std::endl
|
|
||||||
<< std::endl << " -R operate on files and directories recursively"
|
|
||||||
<< std::endl << " -h display this help and exit"
|
|
||||||
<< std::endl
|
|
||||||
<< std::endl << "Owner is unchanged if missing. Group is unchanged if missing."
|
|
||||||
<< std::endl << "OWNER and GROUP may be numeric as well as symbolic."
|
|
||||||
<< std::endl
|
|
||||||
<< std::endl << "Examples:"
|
|
||||||
<< std::endl << "hdfs_chown -R new_owner:new_group hdfs://localhost.localdomain:8020/dir/file"
|
|
||||||
<< std::endl << "hdfs_chown new_owner /dir/file"
|
|
||||||
<< std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct SetOwnerState {
|
|
||||||
const std::string username;
|
|
||||||
const std::string groupname;
|
|
||||||
const std::function<void(const hdfs::Status &)> handler;
|
|
||||||
//The request counter is incremented once every time SetOwner async call is made
|
|
||||||
uint64_t request_counter;
|
|
||||||
//This boolean will be set when find returns the last result
|
|
||||||
bool find_is_done;
|
|
||||||
//Final status to be returned
|
|
||||||
hdfs::Status status;
|
|
||||||
//Shared variables will need protection with a lock
|
|
||||||
std::mutex lock;
|
|
||||||
SetOwnerState(const std::string & username_, const std::string & groupname_,
|
|
||||||
const std::function<void(const hdfs::Status &)> & handler_,
|
|
||||||
uint64_t request_counter_, bool find_is_done_)
|
|
||||||
: username(username_),
|
|
||||||
groupname(groupname_),
|
|
||||||
handler(handler_),
|
|
||||||
request_counter(request_counter_),
|
|
||||||
find_is_done(find_is_done_),
|
|
||||||
status(),
|
|
||||||
lock() {
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
int main(int argc, char *argv[]) {
|
|
||||||
//We should have 3 or 4 parameters
|
|
||||||
if (argc != 3 && argc != 4) {
|
|
||||||
usage();
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool recursive = false;
|
|
||||||
int input;
|
|
||||||
|
|
||||||
//Using GetOpt to read in the values
|
|
||||||
opterr = 0;
|
|
||||||
while ((input = getopt(argc, argv, "Rh")) != -1) {
|
|
||||||
switch (input)
|
|
||||||
{
|
|
||||||
case 'R':
|
|
||||||
recursive = 1;
|
|
||||||
break;
|
|
||||||
case 'h':
|
|
||||||
usage();
|
|
||||||
exit(EXIT_SUCCESS);
|
|
||||||
case '?':
|
|
||||||
if (isprint(optopt))
|
|
||||||
std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
|
|
||||||
else
|
|
||||||
std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
|
|
||||||
usage();
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
default:
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::string owner_and_group = argv[optind];
|
|
||||||
std::string uri_path = argv[optind + 1];
|
|
||||||
|
|
||||||
std::string owner, group;
|
|
||||||
size_t owner_end = owner_and_group.find(":");
|
|
||||||
if(owner_end == std::string::npos) {
|
|
||||||
owner = owner_and_group;
|
|
||||||
} else {
|
|
||||||
owner = owner_and_group.substr(0, owner_end);
|
|
||||||
group = owner_and_group.substr(owner_end + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
//Building a URI object from the given uri_path
|
|
||||||
hdfs::URI uri = hdfs::parse_path_or_exit(uri_path);
|
|
||||||
|
|
||||||
std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri, true);
|
|
||||||
if (!fs) {
|
|
||||||
std::cerr << "Could not connect the file system. " << std::endl;
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* wrap async FileSystem::SetOwner with promise to make it a blocking call */
|
|
||||||
std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
|
|
||||||
std::future<hdfs::Status> future(promise->get_future());
|
|
||||||
auto handler = [promise](const hdfs::Status &s) {
|
|
||||||
promise->set_value(s);
|
|
||||||
};
|
|
||||||
|
|
||||||
if(!recursive){
|
|
||||||
fs->SetOwner(uri.get_path(), owner, group, handler);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
//Allocating shared state, which includes:
|
|
||||||
//username and groupname to be set, handler to be called, request counter, and a boolean to keep track if find is done
|
|
||||||
std::shared_ptr<SetOwnerState> state = std::make_shared<SetOwnerState>(owner, group, handler, 0, false);
|
|
||||||
|
|
||||||
// Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
|
|
||||||
// Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
|
|
||||||
auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
|
|
||||||
|
|
||||||
//For each result returned by Find we call async SetOwner with the handler below.
|
|
||||||
//SetOwner DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetOwner.
|
|
||||||
auto handlerSetOwner = [state](const hdfs::Status &status_set_owner) {
|
|
||||||
std::lock_guard<std::mutex> guard(state->lock);
|
|
||||||
|
|
||||||
//Decrement the counter once since we are done with this async call
|
|
||||||
if (!status_set_owner.ok() && state->status.ok()){
|
|
||||||
//We make sure we set state->status only on the first error.
|
|
||||||
state->status = status_set_owner;
|
|
||||||
}
|
|
||||||
state->request_counter--;
|
|
||||||
if(state->request_counter == 0 && state->find_is_done){
|
|
||||||
state->handler(state->status); //exit
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if(!stat_infos.empty() && state->status.ok()) {
|
|
||||||
for (hdfs::StatInfo const& s : stat_infos) {
|
|
||||||
//Launch an asynchronous call to SetOwner for every returned result
|
|
||||||
state->request_counter++;
|
|
||||||
fs->SetOwner(s.full_path, state->username, state->groupname, handlerSetOwner);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//Lock this section because handlerSetOwner might be accessing the same
|
|
||||||
//shared variables simultaneously
|
|
||||||
std::lock_guard<std::mutex> guard(state->lock);
|
|
||||||
if (!status_find.ok() && state->status.ok()){
|
|
||||||
//We make sure we set state->status only on the first error.
|
|
||||||
state->status = status_find;
|
|
||||||
}
|
|
||||||
if(!has_more_results){
|
|
||||||
state->find_is_done = true;
|
|
||||||
if(state->request_counter == 0){
|
|
||||||
state->handler(state->status); //exit
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
//Asynchronous call to Find
|
|
||||||
fs->Find(uri.get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* block until promise is set */
|
|
||||||
hdfs::Status status = future.get();
|
|
||||||
if (!status.ok()) {
|
|
||||||
std::cerr << "Error: " << status.ToString() << std::endl;
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up static data and prevent valgrind memory leaks
|
|
||||||
google::protobuf::ShutdownProtobufLibrary();
|
|
||||||
return 0;
|
|
||||||
}
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
add_library(hdfs_ownership_obj OBJECT hdfs-ownership.cc)
|
|
@ -0,0 +1,44 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "hdfs-ownership.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools {
|
||||||
|
Ownership::Ownership(const std::string &user_and_group) {
|
||||||
|
const auto owner_end = user_and_group.find(':');
|
||||||
|
if (owner_end == std::string::npos) {
|
||||||
|
user_ = user_and_group;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
user_ = user_and_group.substr(0, owner_end);
|
||||||
|
group_ = user_and_group.substr(owner_end + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Ownership::operator==(const Ownership &other) const {
|
||||||
|
const auto same_user = user_ == other.user_;
|
||||||
|
if (group_.has_value() && other.group_.has_value()) {
|
||||||
|
return same_user && group_.value() == other.group_.value();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!group_.has_value() && !other.group_.has_value()) {
|
||||||
|
return same_user;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} // namespace hdfs::tools
|
|
@ -0,0 +1,88 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef LIBHDFSPP_TOOLS_HDFS_OWNERSHIP
|
||||||
|
#define LIBHDFSPP_TOOLS_HDFS_OWNERSHIP
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <mutex>
|
||||||
|
#include <optional>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
#include "hdfspp/status.h"
|
||||||
|
|
||||||
|
namespace hdfs::tools {
|
||||||
|
/**
|
||||||
|
* {@class Ownership} contains the user and group ownership information.
|
||||||
|
*/
|
||||||
|
struct Ownership {
|
||||||
|
explicit Ownership(const std::string &user_and_group);
|
||||||
|
|
||||||
|
[[nodiscard]] const std::string &GetUser() const { return user_; }
|
||||||
|
|
||||||
|
[[nodiscard]] const std::optional<std::string> &GetGroup() const {
|
||||||
|
return group_;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator==(const Ownership &other) const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string user_;
|
||||||
|
std::optional<std::string> group_;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@class OwnerState} holds information needed for recursive traversal of some
|
||||||
|
* of the HDFS APIs.
|
||||||
|
*/
|
||||||
|
struct OwnerState {
|
||||||
|
OwnerState(std::string username, std::string group,
|
||||||
|
std::function<void(const hdfs::Status &)> handler,
|
||||||
|
const uint64_t request_counter, const bool find_is_done)
|
||||||
|
: user{std::move(username)}, group{std::move(group)}, handler{std::move(
|
||||||
|
handler)},
|
||||||
|
request_counter{request_counter}, find_is_done{find_is_done} {}
|
||||||
|
|
||||||
|
const std::string user;
|
||||||
|
const std::string group;
|
||||||
|
const std::function<void(const hdfs::Status &)> handler;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The request counter is incremented once every time SetOwner async call is
|
||||||
|
* made.
|
||||||
|
*/
|
||||||
|
uint64_t request_counter;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This boolean will be set when find returns the last result.
|
||||||
|
*/
|
||||||
|
bool find_is_done{false};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Final status to be returned.
|
||||||
|
*/
|
||||||
|
hdfs::Status status{};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shared variables will need protection with a lock.
|
||||||
|
*/
|
||||||
|
std::mutex lock;
|
||||||
|
};
|
||||||
|
} // namespace hdfs::tools
|
||||||
|
|
||||||
|
#endif
|
Loading…
Reference in New Issue