mirror of https://github.com/apache/druid.git
moved getHdfsStorageDir() to DataSegmentPusherUtil, extended test
This commit is contained in:
parent
6f06d701d9
commit
18ec081855
|
@ -19,6 +19,8 @@
|
|||
|
||||
package com.metamx.druid.loading;
|
||||
|
||||
import org.joda.time.format.ISODateTimeFormat;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.metamx.druid.client.DataSegment;
|
||||
|
||||
|
@ -26,19 +28,37 @@ import com.metamx.druid.client.DataSegment;
|
|||
*/
|
||||
public class DataSegmentPusherUtil
|
||||
{
|
||||
private static final Joiner JOINER = Joiner.on("/").skipNulls();
|
||||
private static final Joiner JOINER = Joiner.on("/").skipNulls();
|
||||
|
||||
public static String getStorageDir(DataSegment segment)
|
||||
{
|
||||
return JOINER.join(
|
||||
segment.getDataSource(),
|
||||
String.format(
|
||||
"%s_%s",
|
||||
segment.getInterval().getStart(),
|
||||
segment.getInterval().getEnd()
|
||||
),
|
||||
segment.getVersion(),
|
||||
segment.getShardSpec().getPartitionNum()
|
||||
);
|
||||
}
|
||||
public static String getStorageDir(DataSegment segment)
|
||||
{
|
||||
return JOINER.join(
|
||||
segment.getDataSource(),
|
||||
String.format(
|
||||
"%s_%s",
|
||||
segment.getInterval().getStart(),
|
||||
segment.getInterval().getEnd()
|
||||
),
|
||||
segment.getVersion(),
|
||||
segment.getShardSpec().getPartitionNum()
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Due to https://issues.apache.org/jira/browse/HDFS-13 ":" are not allowed in
|
||||
* path names. So we format paths differently for HDFS.
|
||||
*/
|
||||
public static String getHdfsStorageDir(DataSegment segment)
|
||||
{
|
||||
return JOINER.join(
|
||||
segment.getDataSource(),
|
||||
String.format(
|
||||
"%s_%s",
|
||||
segment.getInterval().getStart().toString(ISODateTimeFormat.basicDateTime()),
|
||||
segment.getInterval().getEnd().toString(ISODateTimeFormat.basicDateTime())
|
||||
),
|
||||
segment.getVersion().replaceAll(":", "_"),
|
||||
segment.getShardSpec().getPartitionNum()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ public class HdfsDataSegmentPusher implements DataSegmentPusher
|
|||
@Override
|
||||
public DataSegment push(File inDir, DataSegment segment) throws IOException
|
||||
{
|
||||
final String storageDir = HdfsDataSegmentPusherUtil.getStorageDir(segment);
|
||||
final String storageDir = DataSegmentPusherUtil.getHdfsStorageDir(segment);
|
||||
Path outFile = new Path(String.format("%s/%s/index.zip", config.getStorageDirectory(), storageDir));
|
||||
FileSystem fs = outFile.getFileSystem(hadoopConfig);
|
||||
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package com.metamx.druid.loading;
|
||||
|
||||
import org.joda.time.format.ISODateTimeFormat;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.metamx.druid.client.DataSegment;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class HdfsDataSegmentPusherUtil
|
||||
{
|
||||
private static final Joiner JOINER = Joiner.on("/").skipNulls();
|
||||
|
||||
/**
|
||||
* Due to https://issues.apache.org/jira/browse/HDFS-13 ":" are not allowed in
|
||||
* path names. So we format paths differently for HDFS.
|
||||
*/
|
||||
public static String getStorageDir(DataSegment segment)
|
||||
{
|
||||
return JOINER.join(
|
||||
segment.getDataSource(),
|
||||
String.format(
|
||||
"%s_%s",
|
||||
segment.getInterval().getStart().toString(ISODateTimeFormat.basicDateTime()),
|
||||
segment.getInterval().getEnd().toString(ISODateTimeFormat.basicDateTime())
|
||||
),
|
||||
segment.getVersion().replaceAll(":", "_"),
|
||||
segment.getShardSpec().getPartitionNum()
|
||||
);
|
||||
}
|
||||
}
|
|
@ -13,9 +13,9 @@ import java.util.Arrays;
|
|||
/**
|
||||
* @author jan.rudert
|
||||
*/
|
||||
public class HdfsDataSegmentPusherUtilTest {
|
||||
public class DataSegmentPusherUtilTest {
|
||||
@Test
|
||||
public void testGetStorageDir() throws Exception {
|
||||
public void shouldNotHaveColonsInHdfsStorageDir() throws Exception {
|
||||
|
||||
Interval interval = new Interval("2011-10-01/2011-10-02");
|
||||
ImmutableMap<String, Object> loadSpec = ImmutableMap.<String, Object>of("something", "or_other");
|
||||
|
@ -23,7 +23,7 @@ public class HdfsDataSegmentPusherUtilTest {
|
|||
DataSegment segment = new DataSegment(
|
||||
"something",
|
||||
interval,
|
||||
"1",
|
||||
"brand:new:version",
|
||||
loadSpec,
|
||||
Arrays.asList("dim1", "dim2"),
|
||||
Arrays.asList("met1", "met2"),
|
||||
|
@ -32,8 +32,8 @@ public class HdfsDataSegmentPusherUtilTest {
|
|||
1
|
||||
);
|
||||
|
||||
String storageDir = HdfsDataSegmentPusherUtil.getStorageDir(segment);
|
||||
Assert.assertEquals("something/20111001T000000.000Z_20111002T000000.000Z/1/0", storageDir);
|
||||
String storageDir = DataSegmentPusherUtil.getHdfsStorageDir(segment);
|
||||
Assert.assertEquals("something/20111001T000000.000Z_20111002T000000.000Z/brand_new_version/0", storageDir);
|
||||
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue