MAPREDUCE-6374. Distributed Cache File visibility should check permission of full path. Contributed by Chang Li
This commit is contained in:
parent
e13b671aa5
commit
107da29ff9
|
@ -437,6 +437,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
MAPREDUCE-6204. TestJobCounters should use new properties instead of
|
MAPREDUCE-6204. TestJobCounters should use new properties instead of
|
||||||
JobConf.MAPRED_TASK_JAVA_OPTS. (Sam Liu via ozawa)
|
JobConf.MAPRED_TASK_JAVA_OPTS. (Sam Liu via ozawa)
|
||||||
|
|
||||||
|
MAPREDUCE-6374. Distributed Cache File visibility should check permission
|
||||||
|
of full path (Chang Li via jlowe)
|
||||||
|
|
||||||
Release 2.7.1 - UNRELEASED
|
Release 2.7.1 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -236,6 +236,7 @@ public class ClientDistributedCacheManager {
|
||||||
Map<URI, FileStatus> statCache) throws IOException {
|
Map<URI, FileStatus> statCache) throws IOException {
|
||||||
FileSystem fs = FileSystem.get(uri, conf);
|
FileSystem fs = FileSystem.get(uri, conf);
|
||||||
Path current = new Path(uri.getPath());
|
Path current = new Path(uri.getPath());
|
||||||
|
current = fs.makeQualified(current);
|
||||||
//the leaf level file should be readable by others
|
//the leaf level file should be readable by others
|
||||||
if (!checkPermissionOfOther(fs, current, FsAction.READ, statCache)) {
|
if (!checkPermissionOfOther(fs, current, FsAction.READ, statCache)) {
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.io.SequenceFile;
|
import org.apache.hadoop.io.SequenceFile;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.SequenceFile.CompressionType;
|
import org.apache.hadoop.io.SequenceFile.CompressionType;
|
||||||
|
@ -47,9 +48,13 @@ public class TestClientDistributedCacheManager {
|
||||||
new File(System.getProperty("test.build.data", "/tmp")).toURI()
|
new File(System.getProperty("test.build.data", "/tmp")).toURI()
|
||||||
.toString().replace(' ', '+');
|
.toString().replace(' ', '+');
|
||||||
|
|
||||||
|
private static final String TEST_VISIBILITY_DIR =
|
||||||
|
new File(TEST_ROOT_DIR, "TestCacheVisibility").toURI()
|
||||||
|
.toString().replace(' ', '+');
|
||||||
private FileSystem fs;
|
private FileSystem fs;
|
||||||
private Path firstCacheFile;
|
private Path firstCacheFile;
|
||||||
private Path secondCacheFile;
|
private Path secondCacheFile;
|
||||||
|
private Path thirdCacheFile;
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
@ -58,8 +63,10 @@ public class TestClientDistributedCacheManager {
|
||||||
fs = FileSystem.get(conf);
|
fs = FileSystem.get(conf);
|
||||||
firstCacheFile = new Path(TEST_ROOT_DIR, "firstcachefile");
|
firstCacheFile = new Path(TEST_ROOT_DIR, "firstcachefile");
|
||||||
secondCacheFile = new Path(TEST_ROOT_DIR, "secondcachefile");
|
secondCacheFile = new Path(TEST_ROOT_DIR, "secondcachefile");
|
||||||
|
thirdCacheFile = new Path(TEST_VISIBILITY_DIR,"thirdCachefile");
|
||||||
createTempFile(firstCacheFile, conf);
|
createTempFile(firstCacheFile, conf);
|
||||||
createTempFile(secondCacheFile, conf);
|
createTempFile(secondCacheFile, conf);
|
||||||
|
createTempFile(thirdCacheFile, conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
@ -70,6 +77,9 @@ public class TestClientDistributedCacheManager {
|
||||||
if (!fs.delete(secondCacheFile, false)) {
|
if (!fs.delete(secondCacheFile, false)) {
|
||||||
LOG.warn("Failed to delete secondcachefile");
|
LOG.warn("Failed to delete secondcachefile");
|
||||||
}
|
}
|
||||||
|
if (!fs.delete(thirdCacheFile, false)) {
|
||||||
|
LOG.warn("Failed to delete thirdCachefile");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -93,6 +103,24 @@ public class TestClientDistributedCacheManager {
|
||||||
Assert.assertEquals(expected, jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
|
Assert.assertEquals(expected, jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDetermineCacheVisibilities() throws IOException {
|
||||||
|
Path workingdir = new Path(TEST_VISIBILITY_DIR);
|
||||||
|
fs.setWorkingDirectory(workingdir);
|
||||||
|
fs.setPermission(workingdir, new FsPermission((short)00777));
|
||||||
|
fs.setPermission(new Path(TEST_ROOT_DIR), new FsPermission((short)00700));
|
||||||
|
Job job = Job.getInstance(conf);
|
||||||
|
Path relativePath = new Path("thirdCachefile");
|
||||||
|
job.addCacheFile(relativePath.toUri());
|
||||||
|
Configuration jobConf = job.getConfiguration();
|
||||||
|
|
||||||
|
Map<URI, FileStatus> statCache = new HashMap<URI, FileStatus>();
|
||||||
|
ClientDistributedCacheManager.
|
||||||
|
determineCacheVisibilities(jobConf, statCache);
|
||||||
|
Assert.assertFalse(jobConf.
|
||||||
|
getBoolean(MRJobConfig.CACHE_FILE_VISIBILITIES,true));
|
||||||
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
void createTempFile(Path p, Configuration conf) throws IOException {
|
void createTempFile(Path p, Configuration conf) throws IOException {
|
||||||
SequenceFile.Writer writer = null;
|
SequenceFile.Writer writer = null;
|
||||||
|
|
Loading…
Reference in New Issue