mirror of https://github.com/apache/druid.git
Merge pull request #183 from rjurney/test-harness
Scripts for firing up stuff and getting a stand-alone AWS node working
This commit is contained in:
commit
2403056a0e
|
@ -0,0 +1,27 @@
|
||||||
|
# Setup Oracle Java
|
||||||
|
sudo apt-get update
|
||||||
|
sudo add-apt-repository -y ppa:webupd8team/java
|
||||||
|
sudo apt-get update
|
||||||
|
|
||||||
|
# Setup yes answer to license question
|
||||||
|
echo debconf shared/accepted-oracle-license-v1-1 select true | sudo debconf-set-selections
|
||||||
|
echo debconf shared/accepted-oracle-license-v1-1 seen true | sudo debconf-set-selections
|
||||||
|
sudo apt-get -y -q install oracle-java7-installer
|
||||||
|
|
||||||
|
# Automated Kafka setup
|
||||||
|
curl http://static.druid.io/artifacts/kafka-0.7.2-incubating-bin.tar.gz -o /tmp/kafka-0.7.2-incubating-bin.tar.gz
|
||||||
|
tar -xvzf /tmp/kafka-0.7.2-incubating-bin.tar.gz
|
||||||
|
cd kafka-0.7.2-incubating-bin
|
||||||
|
cat config/zookeeper.properties
|
||||||
|
nohup bin/zookeeper-server-start.sh config/zookeeper.properties 2>&1 > /dev/null &
|
||||||
|
# in a new console
|
||||||
|
nohup bin/kafka-server-start.sh config/server.properties 2>&1 > /dev/null &
|
||||||
|
|
||||||
|
# Install dependencies - mysql must be built from source, as the 12.04 apt-get hangs
|
||||||
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
sudo debconf-set-selections <<< 'mysql-server-5.5 mysql-server/root_password password diurd'
|
||||||
|
sudo debconf-set-selections <<< 'mysql-server-5.5 mysql-server/root_password_again password diurd'
|
||||||
|
sudo apt-get -q -y -V --force-yes --reinstall install mysql-server-5.5
|
||||||
|
|
||||||
|
echo "ALL DONE with druid environment setup! Hit CTRL-C to proceed."
|
||||||
|
exit 0
|
|
@ -0,0 +1,22 @@
|
||||||
|
# Is localhost expected with multi-node?
|
||||||
|
mysql -u root -pdiurd -e "GRANT ALL ON druid.* TO 'druid'@'localhost' IDENTIFIED BY 'diurd'; CREATE database druid;" 2>&1 > /dev/null
|
||||||
|
|
||||||
|
tar -xvzf druid-services-*-bin.tar.gz 2>&1 > /dev/null
|
||||||
|
cd druid-services-* 2>&1 > /dev/null
|
||||||
|
|
||||||
|
mkdir logs 2>&1 > /dev/null
|
||||||
|
|
||||||
|
# Now start a realtime node
|
||||||
|
nohup java -Xmx256m -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Ddruid.realtime.specFile=config/realtime/realtime.spec -classpath lib/druid-services-0.5.5-SNAPSHOT-selfcontained.jar:config/realtime com.metamx.druid.realtime.RealtimeMain 2>&1 > logs/realtime.log &
|
||||||
|
|
||||||
|
# And a master node
|
||||||
|
nohup java -Xmx256m -Duser.timezone=UTC -Dfile.encoding=UTF-8 -classpath lib/druid-services-0.5.5-SNAPSHOT-selfcontained.jar:config/master com.metamx.druid.http.MasterMain 2>&1 > logs/master.log &
|
||||||
|
|
||||||
|
# And a compute node
|
||||||
|
nohup java -Xmx256m -Duser.timezone=UTC -Dfile.encoding=UTF-8 -classpath lib/druid-services-0.5.5-SNAPSHOT-selfcontained.jar:config/compute com.metamx.druid.http.ComputeMain 2>&1 > logs/compute.log &
|
||||||
|
|
||||||
|
# And a broker node
|
||||||
|
nohup java -Xmx256m -Duser.timezone=UTC -Dfile.encoding=UTF-8 -classpath lib/druid-services-0.5.5-SNAPSHOT-selfcontained.jar:config/broker com.metamx.druid.http.BrokerMain 2>&1 > logs/broker.log &
|
||||||
|
|
||||||
|
echo "Hit CTRL-C to continue..."
|
||||||
|
exit 0
|
|
@ -0,0 +1,76 @@
|
||||||
|
# Before running, you will need to download the EC2 tools from http://aws.amazon.com/developertools/351
|
||||||
|
# and then setup your EC2_HOME and PATH variables (or similar):
|
||||||
|
#
|
||||||
|
# # Setup environment for ec2-api-tools
|
||||||
|
# export EC2_HOME=/path/to/ec2-api-tools-1.6.7.4/
|
||||||
|
# export PATH=$PATH:$EC2_HOME/bin
|
||||||
|
# export AWS_ACCESS_KEY=
|
||||||
|
# export AWS_SECRET_KEY=
|
||||||
|
|
||||||
|
# Check for ec2 commands we require and die if they're missing
|
||||||
|
type ec2-create-keypair >/dev/null 2>&1 || { echo >&2 "I require ec2-create-keypair but it's not installed. Aborting."; exit 1; }
|
||||||
|
type ec2-create-group >/dev/null 2>&1 || { echo >&2 "I require ec2-create-group but it's not installed. Aborting."; exit 1; }
|
||||||
|
type ec2-authorize >/dev/null 2>&1 || { echo >&2 "I require ec2-authorize but it's not installed. Aborting."; exit 1; }
|
||||||
|
type ec2-run-instances >/dev/null 2>&1 || { echo >&2 "I require ec2-run-instances but it's not installed. Aborting."; exit 1; }
|
||||||
|
type ec2-describe-instances >/dev/null 2>&1 || { echo >&2 "I require ec2-describe-instances but it's not installed. Aborting."; exit 1; }
|
||||||
|
|
||||||
|
# Create a keypair for our servers
|
||||||
|
echo "Removing old keypair for druid..."
|
||||||
|
ec2-delete-keypair druid-keypair
|
||||||
|
echo "Creating new keypair for druid..."
|
||||||
|
ec2-create-keypair druid-keypair > druid-keypair
|
||||||
|
chmod 0600 druid-keypair
|
||||||
|
mv druid-keypair ~/.ssh/
|
||||||
|
|
||||||
|
# Create a security group for our servers
|
||||||
|
echo "Creating a new security group for druid..."
|
||||||
|
ec2-create-group druid-group -d "Druid Cluster"
|
||||||
|
|
||||||
|
# Create rules that allow necessary services in our group
|
||||||
|
echo "Creating new firewall rules for druid..."
|
||||||
|
# SSH from outside
|
||||||
|
ec2-authorize druid-group -P tcp -p 22
|
||||||
|
# Enable all traffic within group
|
||||||
|
ec2-authorize druid-group -P tcp -p 1-65535 -o druid-group
|
||||||
|
ec2-authorize druid-group -P udp -p 1-65535 -o druid-group
|
||||||
|
|
||||||
|
echo "Booting a single small instance for druid..."
|
||||||
|
# Use ami ami-e7582d8e - Alestic Ubuntu 12.04 us-east
|
||||||
|
INSTANCE_ID=$(ec2-run-instances ami-e7582d8e -n 1 -g druid-group -k druid-keypair --instance-type m1.small| awk '/INSTANCE/{print $2}')
|
||||||
|
while true; do
|
||||||
|
sleep 1
|
||||||
|
INSTANCE_STATUS=$(ec2-describe-instances|grep INSTANCE|grep $INSTANCE_ID|cut -f6)
|
||||||
|
if [ $INSTANCE_STATUS == "running" ]
|
||||||
|
then
|
||||||
|
echo "Instance $INSTANCE_ID is status $INSTANCE_STATUS..."
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Wait for the instance to come up
|
||||||
|
echo "Waiting 60 seconds for instance $INSTANCE_ID to boot..."
|
||||||
|
sleep 60
|
||||||
|
|
||||||
|
# Get hostname and ssh with the key we created, and ssh there
|
||||||
|
INSTANCE_ADDRESS=`ec2-describe-instances|grep 'INSTANCE'|grep $INSTANCE_ID|cut -f4`
|
||||||
|
echo "Connecting to $INSTANCE_ADDRESS to prepare environment for druid..."
|
||||||
|
scp -i ~/.ssh/druid-keypair -o StrictHostKeyChecking=no ./ec2/env.sh ubuntu@${INSTANCE_ADDRESS}:
|
||||||
|
ssh -q -i ~/.ssh/druid-keypair -o StrictHostKeyChecking=no ubuntu@${INSTANCE_ADDRESS} 'chmod +x ./env.sh;./env.sh'
|
||||||
|
|
||||||
|
echo "Prepared $INSTANCE_ADDRESS for druid."
|
||||||
|
|
||||||
|
# Now to scp a tarball up that can run druid!
|
||||||
|
if [ -f ../../services/target/druid-services-*-SNAPSHOT-bin.tar.gz ];
|
||||||
|
then
|
||||||
|
echo "Uploading druid tarball to server..."
|
||||||
|
scp -i ~/.ssh/druid-keypair -o StrictHostKeyChecking=no ../../services/target/druid-services-*-bin.tar.gz ubuntu@${INSTANCE_ADDRESS}:
|
||||||
|
else
|
||||||
|
echo "ERROR - package not built!"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Now boot druid parts
|
||||||
|
scp -i ~/.ssh/druid-keypair -o StrictHostKeyChecking=no ./ec2/run.sh ubuntu@${INSTANCE_ADDRESS}:
|
||||||
|
ssh -q -i ~/.ssh/druid-keypair -o StrictHostKeyChecking=no ubuntu@${INSTANCE_ADDRESS} 'chmod +x ./run.sh;./run.sh'
|
||||||
|
|
||||||
|
echo "Druid booting complete!"
|
||||||
|
echo "ssh -i ~/.ssh/druid-keypair ubuntu@${INSTANCE_ADDRESS} #to connect"
|
|
@ -0,0 +1,43 @@
|
||||||
|
druid.host=127.0.0.1
|
||||||
|
druid.port=8083
|
||||||
|
|
||||||
|
com.metamx.emitter.logging=true
|
||||||
|
|
||||||
|
druid.processing.formatString=processing_%s
|
||||||
|
druid.processing.numThreads=1
|
||||||
|
druid.processing.buffer.sizeBytes=10000000
|
||||||
|
|
||||||
|
#emitting, opaque marker
|
||||||
|
druid.service=example
|
||||||
|
|
||||||
|
druid.request.logging.dir=/tmp/example/log
|
||||||
|
druid.realtime.specFile=realtime.spec
|
||||||
|
com.metamx.emitter.logging=true
|
||||||
|
com.metamx.emitter.logging.level=info
|
||||||
|
|
||||||
|
# below are dummy values when operating a realtime only node
|
||||||
|
com.metamx.aws.accessKey=dummy_access_key
|
||||||
|
com.metamx.aws.secretKey=dummy_secret_key
|
||||||
|
druid.pusher.s3.bucket=dummy_s3_bucket
|
||||||
|
|
||||||
|
druid.zk.service.host=localhost
|
||||||
|
druid.server.maxSize=300000000000
|
||||||
|
druid.zk.paths.base=/druid
|
||||||
|
druid.database.segmentTable=prod_segments
|
||||||
|
druid.database.user=druid
|
||||||
|
druid.database.password=diurd
|
||||||
|
druid.database.connectURI=jdbc:mysql://localhost:3306/druid
|
||||||
|
druid.zk.paths.discoveryPath=/druid/discoveryPath
|
||||||
|
druid.database.ruleTable=rules
|
||||||
|
druid.database.configTable=config
|
||||||
|
|
||||||
|
# Path on local FS for storage of segments; dir will be created if needed
|
||||||
|
druid.paths.indexCache=/tmp/druid/indexCache
|
||||||
|
# Path on local FS for storage of segment metadata; dir will be created if needed
|
||||||
|
druid.paths.segmentInfoCache=/tmp/druid/segmentInfoCache
|
||||||
|
druid.pusher.local.storageDirectory=/tmp/druid/localStorage
|
||||||
|
druid.pusher.local=true
|
||||||
|
|
||||||
|
# thread pool size for servicing queries
|
||||||
|
druid.client.http.connections=30
|
||||||
|
druid.host=127.0.0.1:8083
|
|
@ -0,0 +1,39 @@
|
||||||
|
druid.host=127.0.0.1
|
||||||
|
druid.port=8082
|
||||||
|
|
||||||
|
com.metamx.emitter.logging=true
|
||||||
|
|
||||||
|
druid.processing.formatString=processing_%s
|
||||||
|
druid.processing.numThreads=1
|
||||||
|
druid.processing.buffer.sizeBytes=10000000
|
||||||
|
|
||||||
|
#emitting, opaque marker
|
||||||
|
druid.service=example
|
||||||
|
|
||||||
|
druid.request.logging.dir=/tmp/example/log
|
||||||
|
druid.realtime.specFile=realtime.spec
|
||||||
|
com.metamx.emitter.logging=true
|
||||||
|
com.metamx.emitter.logging.level=info
|
||||||
|
|
||||||
|
# below are dummy values when operating a realtime only node
|
||||||
|
com.metamx.aws.accessKey=dummy_access_key
|
||||||
|
com.metamx.aws.secretKey=dummy_secret_key
|
||||||
|
druid.pusher.s3.bucket=dummy_s3_bucket
|
||||||
|
|
||||||
|
druid.zk.service.host=localhost
|
||||||
|
druid.server.maxSize=300000000000
|
||||||
|
druid.zk.paths.base=/druid
|
||||||
|
druid.database.segmentTable=prod_segments
|
||||||
|
druid.database.user=druid
|
||||||
|
druid.database.password=diurd
|
||||||
|
druid.database.connectURI=jdbc:mysql://localhost:3306/druid
|
||||||
|
druid.zk.paths.discoveryPath=/druid/discoveryPath
|
||||||
|
druid.database.ruleTable=rules
|
||||||
|
druid.database.configTable=config
|
||||||
|
|
||||||
|
# Path on local FS for storage of segments; dir will be created if needed
|
||||||
|
druid.paths.indexCache=/tmp/druid/indexCache
|
||||||
|
# Path on local FS for storage of segment metadata; dir will be created if needed
|
||||||
|
druid.paths.segmentInfoCache=/tmp/druid/segmentInfoCache
|
||||||
|
druid.pusher.local.storageDirectory=/tmp/druid/localStorage
|
||||||
|
druid.pusher.local=true
|
|
@ -0,0 +1,39 @@
|
||||||
|
druid.host=127.0.0.1
|
||||||
|
druid.port=8081
|
||||||
|
|
||||||
|
com.metamx.emitter.logging=true
|
||||||
|
|
||||||
|
druid.processing.formatString=processing_%s
|
||||||
|
druid.processing.numThreads=1
|
||||||
|
druid.processing.buffer.sizeBytes=10000000
|
||||||
|
|
||||||
|
#emitting, opaque marker
|
||||||
|
druid.service=example
|
||||||
|
|
||||||
|
druid.request.logging.dir=/tmp/example/log
|
||||||
|
druid.realtime.specFile=realtime.spec
|
||||||
|
com.metamx.emitter.logging=true
|
||||||
|
com.metamx.emitter.logging.level=info
|
||||||
|
|
||||||
|
# below are dummy values when operating a realtime only node
|
||||||
|
com.metamx.aws.accessKey=dummy_access_key
|
||||||
|
com.metamx.aws.secretKey=dummy_secret_key
|
||||||
|
druid.pusher.s3.bucket=dummy_s3_bucket
|
||||||
|
|
||||||
|
druid.zk.service.host=localhost
|
||||||
|
druid.server.maxSize=300000000000
|
||||||
|
druid.zk.paths.base=/druid
|
||||||
|
druid.database.segmentTable=prod_segments
|
||||||
|
druid.database.user=druid
|
||||||
|
druid.database.password=diurd
|
||||||
|
druid.database.connectURI=jdbc:mysql://localhost:3306/druid
|
||||||
|
druid.zk.paths.discoveryPath=/druid/discoveryPath
|
||||||
|
druid.database.ruleTable=rules
|
||||||
|
druid.database.configTable=config
|
||||||
|
|
||||||
|
# Path on local FS for storage of segments; dir will be created if needed
|
||||||
|
druid.paths.indexCache=/tmp/druid/indexCache
|
||||||
|
# Path on local FS for storage of segment metadata; dir will be created if needed
|
||||||
|
druid.paths.segmentInfoCache=/tmp/druid/segmentInfoCache
|
||||||
|
druid.pusher.local.storageDirectory=/tmp/druid/localStorage
|
||||||
|
druid.pusher.local=true
|
|
@ -0,0 +1,29 @@
|
||||||
|
[{
|
||||||
|
"schema" : { "dataSource":"druidtest",
|
||||||
|
"aggregators":[ {"type":"count", "name":"impressions"},
|
||||||
|
{"type":"doubleSum","name":"wp","fieldName":"wp"}],
|
||||||
|
"indexGranularity":"minute",
|
||||||
|
"shardSpec" : { "type": "none" } },
|
||||||
|
"config" : { "maxRowsInMemory" : 500000,
|
||||||
|
"intermediatePersistPeriod" : "PT10m" },
|
||||||
|
"firehose" : { "type" : "kafka-0.7.2",
|
||||||
|
"consumerProps" : { "zk.connect" : "localhost:2181",
|
||||||
|
"zk.connectiontimeout.ms" : "15000",
|
||||||
|
"zk.sessiontimeout.ms" : "15000",
|
||||||
|
"zk.synctime.ms" : "5000",
|
||||||
|
"groupid" : "topic-pixel-local",
|
||||||
|
"fetch.size" : "1048586",
|
||||||
|
"autooffset.reset" : "largest",
|
||||||
|
"autocommit.enable" : "false" },
|
||||||
|
"feed" : "druidtest",
|
||||||
|
"parser" : { "timestampSpec" : { "column" : "utcdt", "format" : "iso" },
|
||||||
|
"data" : { "format" : "json" },
|
||||||
|
"dimensionExclusions" : ["wp"] } },
|
||||||
|
"plumber" : { "type" : "realtime",
|
||||||
|
"windowPeriod" : "PT10m",
|
||||||
|
"segmentGranularity":"hour",
|
||||||
|
"basePersistDirectory" : "/tmp/realtime/basePersist",
|
||||||
|
"rejectionPolicy": {"type": "messageTime"} }
|
||||||
|
|
||||||
|
}]
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
druid.host=127.0.0.1
|
||||||
|
druid.port=8080
|
||||||
|
|
||||||
|
com.metamx.emitter.logging=true
|
||||||
|
|
||||||
|
druid.processing.formatString=processing_%s
|
||||||
|
druid.processing.numThreads=1
|
||||||
|
druid.processing.buffer.sizeBytes=10000000
|
||||||
|
|
||||||
|
#emitting, opaque marker
|
||||||
|
druid.service=example
|
||||||
|
|
||||||
|
druid.request.logging.dir=/tmp/example/log
|
||||||
|
druid.realtime.specFile=realtime.spec
|
||||||
|
com.metamx.emitter.logging=true
|
||||||
|
com.metamx.emitter.logging.level=info
|
||||||
|
|
||||||
|
# below are dummy values when operating a realtime only node
|
||||||
|
com.metamx.aws.accessKey=dummy_access_key
|
||||||
|
com.metamx.aws.secretKey=dummy_secret_key
|
||||||
|
druid.pusher.s3.bucket=dummy_s3_bucket
|
||||||
|
|
||||||
|
druid.zk.service.host=localhost
|
||||||
|
druid.server.maxSize=300000000000
|
||||||
|
druid.zk.paths.base=/druid
|
||||||
|
druid.database.segmentTable=prod_segments
|
||||||
|
druid.database.user=druid
|
||||||
|
druid.database.password=diurd
|
||||||
|
druid.database.connectURI=jdbc:mysql://localhost:3306/druid
|
||||||
|
druid.zk.paths.discoveryPath=/druid/discoveryPath
|
||||||
|
druid.database.ruleTable=rules
|
||||||
|
druid.database.configTable=config
|
||||||
|
|
||||||
|
# Path on local FS for storage of segments; dir will be created if needed
|
||||||
|
druid.paths.indexCache=/tmp/druid/indexCache
|
||||||
|
# Path on local FS for storage of segment metadata; dir will be created if needed
|
||||||
|
druid.paths.segmentInfoCache=/tmp/druid/segmentInfoCache
|
||||||
|
druid.pusher.local.storageDirectory=/tmp/druid/localStorage
|
||||||
|
druid.pusher.local=true
|
||||||
|
|
||||||
|
druid.host=127.0.0.1:8080
|
|
@ -1,10 +1,9 @@
|
||||||
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
|
<?xml version="1.0"?>
|
||||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
|
||||||
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
|
|
||||||
<id>bin</id>
|
<id>bin</id>
|
||||||
<formats>
|
<formats>
|
||||||
<format>tar.gz</format>
|
<format>tar.gz</format>
|
||||||
</formats>
|
</formats>
|
||||||
<fileSets>
|
<fileSets>
|
||||||
<fileSet>
|
<fileSet>
|
||||||
<directory>../examples/config</directory>
|
<directory>../examples/config</directory>
|
||||||
|
@ -13,6 +12,34 @@
|
||||||
</includes>
|
</includes>
|
||||||
<outputDirectory>config</outputDirectory>
|
<outputDirectory>config</outputDirectory>
|
||||||
</fileSet>
|
</fileSet>
|
||||||
|
<fileSet>
|
||||||
|
<directory>../examples/config/broker</directory>
|
||||||
|
<includes>
|
||||||
|
<include>*</include>
|
||||||
|
</includes>
|
||||||
|
<outputDirectory>config/broker</outputDirectory>
|
||||||
|
</fileSet>
|
||||||
|
<fileSet>
|
||||||
|
<directory>../examples/config/master</directory>
|
||||||
|
<includes>
|
||||||
|
<include>*</include>
|
||||||
|
</includes>
|
||||||
|
<outputDirectory>config/master</outputDirectory>
|
||||||
|
</fileSet>
|
||||||
|
<fileSet>
|
||||||
|
<directory>../examples/config/realtime</directory>
|
||||||
|
<includes>
|
||||||
|
<include>*</include>
|
||||||
|
</includes>
|
||||||
|
<outputDirectory>config/realtime</outputDirectory>
|
||||||
|
</fileSet>
|
||||||
|
<fileSet>
|
||||||
|
<directory>../examples/config/compute</directory>
|
||||||
|
<includes>
|
||||||
|
<include>*</include>
|
||||||
|
</includes>
|
||||||
|
<outputDirectory>config/compute</outputDirectory>
|
||||||
|
</fileSet>
|
||||||
<fileSet>
|
<fileSet>
|
||||||
<directory>../examples/bin</directory>
|
<directory>../examples/bin</directory>
|
||||||
<includes>
|
<includes>
|
||||||
|
@ -28,14 +55,21 @@
|
||||||
</includes>
|
</includes>
|
||||||
<outputDirectory>lib</outputDirectory>
|
<outputDirectory>lib</outputDirectory>
|
||||||
</fileSet>
|
</fileSet>
|
||||||
<fileSet>
|
<fileSet>
|
||||||
|
<directory>../services/target</directory>
|
||||||
|
<includes>
|
||||||
|
<include>druid-services-*-selfcontained.jar</include>
|
||||||
|
</includes>
|
||||||
|
<outputDirectory>lib</outputDirectory>
|
||||||
|
</fileSet>
|
||||||
|
<fileSet>
|
||||||
<directory>../examples/bin/examples</directory>
|
<directory>../examples/bin/examples</directory>
|
||||||
<includes>
|
<includes>
|
||||||
<include>**</include>
|
<include>**</include>
|
||||||
</includes>
|
</includes>
|
||||||
<outputDirectory>examples</outputDirectory>
|
<outputDirectory>examples</outputDirectory>
|
||||||
</fileSet>
|
</fileSet>
|
||||||
<fileSet>
|
<fileSet>
|
||||||
<directory>../examples/bin/examples/twitter</directory>
|
<directory>../examples/bin/examples/twitter</directory>
|
||||||
<includes>
|
<includes>
|
||||||
<include>*sh</include>
|
<include>*sh</include>
|
||||||
|
|
Loading…
Reference in New Issue