summaryrefslogtreecommitdiff
path: root/qpid/java/perftests/bin
diff options
context:
space:
mode:
Diffstat (limited to 'qpid/java/perftests/bin')
-rwxr-xr-xqpid/java/perftests/bin/monitoring/monitor-broker.sh221
-rwxr-xr-xqpid/java/perftests/bin/monitoring/runTests.sh148
-rwxr-xr-xqpid/java/perftests/bin/monitoring/stop-monitored-broker.sh124
-rwxr-xr-xqpid/java/perftests/bin/processing/process.sh388
-rwxr-xr-xqpid/java/perftests/bin/processing/processAll.sh58
-rwxr-xr-xqpid/java/perftests/bin/processing/processTests.py850
-rwxr-xr-xqpid/java/perftests/bin/run_many.sh30
-rwxr-xr-xqpid/java/perftests/bin/topicListener.sh32
-rwxr-xr-xqpid/java/perftests/bin/topicPublisher.sh31
9 files changed, 1882 insertions, 0 deletions
diff --git a/qpid/java/perftests/bin/monitoring/monitor-broker.sh b/qpid/java/perftests/bin/monitoring/monitor-broker.sh
new file mode 100755
index 0000000000..27c8ff25f6
--- /dev/null
+++ b/qpid/java/perftests/bin/monitoring/monitor-broker.sh
@@ -0,0 +1,221 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+#
+# This script starts a broker and then starts additional logging as required.
+# *.pid files are generated in the LOG_DIR for later use by the stop-monitored-broker
+# script.
+#
+# Currently this process starts:
+# - The broker with additional QPID_OPTS for gc logging
+# - Top to monitoring the CPU usage
+#
+# Additional processes can be started and as long as they write a PID into LOG_DIR/*.pid
+# it will be shutdown with the stop script
+#
+
+#
+# Output the broker log file to aid problem diagnosis
+# then exit.
+#
+brokerFailExit()
+{
+ echo "Broker failed to start up."
+ cat $BROKER_LOG
+ exit 1
+}
+
+showUsageExit()
+{
+ echo "Usage $0 <Path to Test Broker> <LOG DIR> <CPU Monitor Rate (s)> [Additional options to
+ pass to Qpid broker startup]"
+ exit 1
+}
+
+#
+# Perform 3 attempts to get the broker PID via ps and grep
+# if unable the output broker log and exit
+#
+getBrokerPID()
+{
+ attempts=3
+ ready=0
+ while [ $ready == 0 ] ; do
+
+ PID=`ps auxwww| grep java | grep Xloggc | awk '{print $2}'`
+
+ if [ ! $PID == 0 ] ; then
+ ready=1
+ else
+ attempts=$[ $attempts - 1 ]
+
+ if [ $attempts == 0 ] ; then
+ brokerFailExit
+ fi
+
+ sleep 1
+ fi
+ done
+
+}
+
+
+#
+# Additional Check to ensure that the broker process
+# has correctly written 'Ready' to the log file.
+#
+checkBrokerStarted()
+{
+ attempts=3
+ ready=0
+ while [ $ready == 0 ] ; do
+
+ grep Ready $BROKER_LOG > /dev/null
+
+ if [ $? == 0 ] ; then
+ ready=1
+ else
+ attempts=$[ $attempts - 1 ]
+
+ if [ $attempts == 0 ] ; then
+ brokerFailExit
+ fi
+
+ echo "Broker not ready sleeping 1s"
+ sleep 1
+ fi
+ done
+}
+
+#
+# Command Line setup
+#
+
+# Ensure we have minimum of three arguments
+if [[ $# > 2 ]] ; then
+ BROKER_VERSION=$1
+ LOG_DIR=$2
+ CPU_MONITOR_RATE=$3
+ # Remove these arguments from the $@ variable
+ shift
+ shift
+ shift
+else
+ # If we have no arguments then use these as the default
+ CPU_MONITOR_RATE=0.5
+ LOG_DIR=$QPID_WORK/logging
+ BROKER_VERSION=qpid-0.5
+fi
+
+
+#
+# Check the specified broker is reachable
+# it it is not the log and show usage
+#
+if [ ! -d $BROKER_VERSION ] ; then
+ echo "Broker not available at: $BROKER_VERSION"
+ showUsageExit
+fi
+
+#
+# Check to see if we have an absolute path for logging
+#
+logStart=`echo $LOG_DIR|cut -c 1`
+
+
+#
+# If we don't have an absolute path then add the current
+# directory path to the start.
+#
+if [[ $logStart != '/' ]] ; then
+ echo -n "$LOG_DIR is not absolute, using "
+ LOG_DIR=`pwd`/$LOG_DIR
+ echo $LOG_DIR
+fi
+
+#
+# Validate that the directory does not exist
+# - this is so we can guarrantee a clean run.
+# If it does exit then log and show usage
+#
+if [ -d $LOG_DIR ] ; then
+ echo "Log directory already exists : $LOG_DIR"
+ showUsageExit
+fi
+
+#
+# Create the logging directory
+#
+mkdir -p $LOG_DIR
+
+#
+# Variable for broker log
+#
+BROKER_LOG=$LOG_DIR/broker.log
+
+# Variable to hold broker PID
+PID=0
+
+export QPID_OPTS="-Xloggc:$LOG_DIR/gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps"
+
+#
+# Start Qpid Broker
+#
+echo "Starting Broker : $BROKER_VERSION"
+pushd $BROKER_VERSION/bin > /dev/null
+./qpid-server $@ 2> $BROKER_LOG >&2 &
+popd > /dev/null
+
+# Wait and check startup was ok
+echo "Waiting for broker startup"
+getBrokerPID
+
+checkBrokerStarted
+
+echo $PID > $LOG_DIR/broker.pid
+
+#
+# Start CPU Monitoring via TOP
+#
+echo "Starting CPU Monitor at RATE:$CPU_MONITOR_RATE on $SERVER1"
+pushd $LOG_DIR > /dev/null
+
+echo $CPU_MONITOR_RATE > top.rate
+
+top -d $CPU_MONITOR_RATE -S -c -p $PID -b > broker_cpu.log &
+
+#
+# Get top pid using $!
+#
+echo $! > $LOG_DIR/top.pid
+
+popd > /dev/null
+
+
+#
+# Generate Stat files
+#
+echo "Generating Stat data"
+stat $BROKER_LOG > $BROKER_LOG.stat
+stat $LOG_DIR/broker_cpu.log > $LOG_DIR/broker_cpu.log.stat
+stat $LOG_DIR/gc.log > $LOG_DIR/gc.log.stat
+
diff --git a/qpid/java/perftests/bin/monitoring/runTests.sh b/qpid/java/perftests/bin/monitoring/runTests.sh
new file mode 100755
index 0000000000..676db0d4fa
--- /dev/null
+++ b/qpid/java/perftests/bin/monitoring/runTests.sh
@@ -0,0 +1,148 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+#
+# Run specified performance tests and
+# gather details about the test run
+#
+
+
+runTest()
+{
+ echo "$@"
+ echo "$@ --csv -o $LOG_DIR" >> $LOG_DIR/TestRun.log 2>&1
+ ./$@ --csv -o $LOG_DIR >> $LOG_DIR/TestRun.log 2>&1
+}
+
+showUsageExit()
+{
+ echo "Usage $0 <Path to Test Pack> <LOG DIR> <TEST LIST FILE>"
+ exit 1
+}
+
+# Ensure we have minimum of three arguments
+if [[ $# > 2 ]] ; then
+ TEST_VERSION=$1
+ LOG_DIR=$2
+ TEST_LIST=$3
+ # Remove these arguments from the $@ variable
+ shift
+ shift
+ shift
+else
+ showUsageExit
+fi
+
+#
+# Check the specified broker is reachable
+# it it is not the log and show usage
+#
+if [ ! -d $TEST_VERSION ] ; then
+ echo "Tests not available at: $TEST_VERSION"
+ showUsageExit
+fi
+
+
+#
+# Check to see if we have an absolute path for logging
+#
+logStart=`echo $LOG_DIR|cut -c 1`
+
+#
+# If we don't have an absolute path then add the current
+# directory path to the start.
+#
+if [[ $logStart != '/' ]] ; then
+ echo -n "$LOG_DIR is not absolute, using "
+ LOG_DIR=`pwd`/$LOG_DIR
+ echo $LOG_DIR
+fi
+
+#
+# Validate that the directory does not exist
+# - this is so we can guarrantee a clean run.
+# If it does exit then log and show usage
+#
+if [ -d $LOG_DIR ] ; then
+ echo "Log directory already exists : $LOG_DIR"
+ showUsageExit
+fi
+
+#
+# Check to see if we have an absolute path for test list
+#
+testListStart=`echo $TEST_LIST|cut -c 1`
+
+#
+# If we don't have an absolute path then add the current
+# directory path to the start.
+#
+if [[ $testListStart != '/' ]] ; then
+ echo -n "$TEST_LIST is not absolute, using "
+ TEST_LIST=`pwd`/$TEST_LIST
+ echo $TEST_LIST
+fi
+
+#
+# Validate that the directory does not exist
+# - this is so we can guarrantee a clean run.
+# If it does exit then log and show usage
+#
+# -r Check file exists and is readable
+if [ ! -r $TEST_LIST ] ; then
+ echo "Test file is not readable : $TEST_LIST"
+ showUsageExit
+fi
+
+
+
+#
+# Create the logging directory
+#
+mkdir -p $LOG_DIR
+
+
+
+echo "Starting Test Run in : $TEST_VERSION"
+pushd $TEST_VERSION/bin > /dev/null
+
+#
+# Run tests
+#
+
+
+while read testCommand
+do
+ runTest $testCommand
+done < "$TEST_LIST"
+
+
+popd > /dev/null
+
+
+#
+# Generate Stat files
+#
+echo "Generating Stat data"
+for file in `find $LOG_DIR -name "*.csv"` ; do
+ stat $file > $file.stat
+done
diff --git a/qpid/java/perftests/bin/monitoring/stop-monitored-broker.sh b/qpid/java/perftests/bin/monitoring/stop-monitored-broker.sh
new file mode 100755
index 0000000000..ad882b0bea
--- /dev/null
+++ b/qpid/java/perftests/bin/monitoring/stop-monitored-broker.sh
@@ -0,0 +1,124 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+#
+# Script to stop the running of a monitored broker
+# and the associated monitoring processes.
+#
+# Looks in the specifed directory for pid files and
+# stops those proceses
+#
+
+
+usage()
+{
+ echo "Usage: $0 <LOG_DIR>"
+}
+
+#
+# Attempt to gracefully kill processs
+#
+stopRun()
+{
+ kill $PIDS
+}
+
+
+#
+# Forcibly stop processes
+#
+forceStopRun()
+{
+ kill -9 $PIDS
+}
+
+#
+# Show usage if we are not started correctly
+#
+if [ $# != 1 ] ; then
+ usage
+ exit 1
+fi
+
+LOG_DIR=$1
+
+
+PIDS=`cat $LOG_DIR/*.pid`
+
+echo "Preparing to stop:"
+#
+# Escape the path so we can use sed to remove it.
+#
+path=`echo $LOG_DIR|sed -e s/\\\//\\\\\\\\\\\//g`
+
+for i in `ls $LOG_DIR/*.pid` ; do
+ # Remove path from pid item then remove any final '/'
+ echo $i|cut -d '.' -f 1| sed -e s/$path// |tr '/' ' '
+done
+
+status=`ps $PIDS |wc -l`
+
+if [ $status == 1 ] ; then
+ echo "Processes do not appear to be running."
+ echo "Have they already been stopped?"
+ exit 0
+fi
+
+attempts=3
+
+while [ ! $status == 1 ] ; do
+ stopRun
+ sleep 1
+ status=`ps $PIDS |wc -l`
+
+ if [ $status == 1 ] ; then
+ echo "Done"
+ exit 0
+ else
+ attempts=$[ $attempts - 1 ]
+
+ if [ $attempts == 0 ] ; then
+ break
+ fi
+
+ echo "Sleeping as processes not stopped"
+ sleep 2
+
+ fi
+done
+
+# If we haven't been able to kill the processes then
+# forcibly do it
+if [ ! $status == 1 ] ; then
+ forceStopRun
+ sleep 1
+ status=`ps $PIDS |wc -l`
+
+ if [ $status == 1 ] ; then
+ echo "Done"
+ else
+ echo "Stop failed"
+ exit 1
+ fi
+else
+ echo "Done"
+fi
diff --git a/qpid/java/perftests/bin/processing/process.sh b/qpid/java/perftests/bin/processing/process.sh
new file mode 100755
index 0000000000..6dbf9b9ea8
--- /dev/null
+++ b/qpid/java/perftests/bin/processing/process.sh
@@ -0,0 +1,388 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+usage()
+{
+ echo "usage: process.sh <Qpid Version> <Test Type Queue/Topic/Latency..> <Volume of data %age> <broker details>"
+ echo "These parameters are used to title and name the graphs:"
+ echo 'Title = $<Qpid Version> $<Test Type> : $<Volume>% volume'
+ echo 'File = $<Qpid Version>-$<Broker Details>-$<Test Type>-$<Volume>'
+ exit 1
+}
+
+processCMSGCFile()
+{
+ # Extract all the ParNew timings: Ignore any lines corrupted by concurrent CMS logging.
+ grep -v CMS gc.log|grep ParNew > $work/parnew.gc.log
+
+ # Calculate Full GC Data
+ grep failure -B 1 gc.log | sed -e :a -e '$!N;s/\n.(concurrent/ /;ta' -e 'P;D' |grep -v ^\- > $work/full.gc.log
+ cut -d ':' -f 1 $work/full.gc.log > $work/full.time.log
+ sed -e 's/failure)/\#/g' $work/full.gc.log |cut -d '#' -f 2- |awk '{print $3}' > $work/full.dur.log
+
+ # Move to the working directory to further process the gathered data.
+ pushd $work &> /dev/null
+
+ # Extract the Heap and timing data.
+ #8 is the full gc data
+ #5 is the paranew gc data
+ cat parnew.gc.log | awk '{print $8}' | cut -d '(' -f 2 | cut -d 'K' -f 1 > HEAP_MAX.log
+ cat parnew.gc.log | awk '{print $8}' | cut -d 'K' -f 1 > HEAP_PRE_GC.log
+ cat parnew.gc.log | awk '{print $8}' | cut -d 'K' -f 2 | cut -d '>' -f 2 > HEAP_POST_GC.log
+ cat parnew.gc.log | awk '{print $1}' | cut -d ':' -f 1 > GC_TIME.log
+
+ calculateStats "MEM_PRE" HEAP_PRE_GC.log
+ calculateStats "MEM_POST" HEAP_POST_GC.log
+ calculateStatsBC "GC_FREQ" GC_TIME.log
+
+
+ # Calculate ParNew GC Cumulative total
+ cat parnew.gc.log |awk '{sum=sum+$6; print $6, sum}' > GC_DUR.log
+ # Give a count of GC occurances
+ parNewGCCount=`wc -l GC_DUR.log| awk '{print $1}'`
+
+ # Create the Heap data file
+ paste GC_TIME.log HEAP_POST_GC.log HEAP_PRE_GC.log HEAP_MAX.log > GC.Heap.data
+ # Create ParNew GC Duration data file for graphing
+ paste GC_TIME.log GC_DUR.log > GC.Dur.data
+ # Create Full GC occurance log file for plotting
+ paste full.time.log full.dur.log > GC.Full.data
+
+ # Calculate All GC Timing and give a count of their occurance
+ awk '{print $1}' GC_DUR.log > gc_dur.log
+ paste GC_TIME.log gc_dur.log > gc_all.log
+ cat GC.Full.data >> gc_all.log
+ sort -n gc_all.log | awk '{sum=sum+$2;print $1 , sum}' > GC.Dur.All.data
+ fullGCCount=`wc -l GC.Full.data| awk '{print $1}'`
+}
+
+processG1GCFile()
+{
+ cat gc.log | grep -e \^[0-9]*.[0-9]*\: -e \^.*[0-9]*.\-\> > $work/g1.gc.log
+
+ # Move to the working directory to further process the gathered data.
+ pushd $work &> /dev/null
+
+ # Calculate Full GC Data
+ cat g1.gc.log | sed -e :a -e '$!N;s/\n.\ *\[/ \[/;ta' -e 'P;D' > full.gc.log
+ grep Full full.gc.log |awk '{print $1}'| tr ':' ' ' > full.time.log
+ grep Full full.gc.log |awk '{print $5}' > full.dur.log
+ fullGCCount=`wc -l GC.Full.data| awk '{print $1}'`
+
+ # Create Full GC occurance log file for plotting
+ paste full.time.log full.dur.log > GC.Full.data
+
+ # Extract the Heap and timing data.
+ # Create G1 Young Duration data file for graphing
+ grep "(young)," full.gc.log |awk '{print $1}' | tr ':' ' ' > GC_TIME.log
+ grep "(young)," full.gc.log |awk '{print $5}' > young.gc.time.log
+ # Calculate G1 young Cumulative total
+ cat young.gc.time.log |awk '{sum=sum+$1; print $1, sum}' > GC_DUR.log
+ # Create G1 Young Duration data file for graphing
+ paste GC_TIME.log GC_DUR.log > GC.Dur.data
+
+ # Give a count of GC occurances
+ youngGCCount=`wc -l GC_DUR.log| awk '{print $1}'`
+
+ #
+ # If we have no GCs then something is wrong
+ if [ $youngGCCount == 0 ] ; then
+ echo "Error no YoungGC log entries to proceses"
+ return
+ fi
+
+ # Gather the Heap Size data
+
+ grep "(young)," full.gc.log | awk '{print $8}' | cut -d '(' -f 2 | cut -d ')' -f 1 > HEAP_MAX.Sized.log
+ grep "(young)," full.gc.log | awk '{print $8}' | cut -d '-' -f 1 > HEAP_PRE_GC.Sized.log
+ grep "(young)," full.gc.log | awk '{print $8}' | cut -d '>' -f 2| cut -d '(' -f 1 > HEAP_POST_GC.Sized.log
+
+ normaliseSizeMemFile HEAP_MAX.Sized.log HEAP_MAX.log
+ normaliseSizeMemFile HEAP_PRE_GC.Sized.log HEAP_PRE_GC.log
+ normaliseSizeMemFile HEAP_POST_GC.Sized.log HEAP_POST_GC.log
+
+
+ # Create the Heap data file
+ paste GC_TIME.log HEAP_POST_GC.log HEAP_PRE_GC.log HEAP_MAX.log > GC.Heap.data
+
+ # Calculate All GC Timing and give a count of their occurance
+ awk '{print $1}' GC_DUR.log > gc_dur.log
+ paste GC_TIME.log gc_dur.log > gc_all.log
+ cat GC.Full.data >> gc_all.log
+ sort -n gc_all.log | awk '{sum=sum+$2;print $1 , sum}' > GC.Dur.All.data
+
+}
+
+calculateStatsBC()
+{
+label=$1
+statFile=$2
+# Calculate GC Frequencies
+ prevFreq=0
+ countFreq=0
+ gcFreqTotal=0
+ minFreq=99999999
+ maxFreq=0
+
+ echo "" > GC_FREQ.log
+ for time in `cat $statFile` ; do
+ if [ $prevFreq == 0 ] ; then
+ prevFreq=$time
+ else
+ freq=`echo $time - $prevFreq | bc`
+ echo $freq >> GC_FREQ.log
+
+ prevFreq=$time
+ gcFreqTotal=`echo $gcFreqTotal + $freq | bc`
+ countFreq=$[$countFreq + 1]
+
+ if [ `echo "$freq > $maxFreq " | bc` == 1 ] ; then
+ maxFreq=$freq
+ fi
+
+ if [ `echo "$freq < $minFreq " | bc ` == 1 ] ; then
+ minFreq=$freq
+ fi
+ fi
+ done
+
+ # Extract Min/Max/Avg
+ echo "#type:min/max/avg" > $statFile.stats
+
+ if [ $countFreq != 0 ] ; then
+ avgFreq=`echo $gcFreqTotal / $countFreq | bc -l 2&> /dev/null`
+ echo "$label:$minFreq/$maxFreq/$avgFreq" >> $statFile.stats
+ echo "Done GC Freq Stat generation for $statFile"
+ else
+ echo "$label:-/-/-" >> $statFile.stats
+ echo "Unable to calculate GC Freq stats as no freq entries found."
+ fi
+}
+
+calculateStats()
+{
+label=$1
+statFile=$2
+
+# Calculate GC Frequencies
+ count=0
+ gcTotal=0
+ min=0
+ max=0
+
+
+ for item in `cat $statFile` ; do
+ if [ $min == 0 ] ; then
+ min=$item
+ fi
+
+ gcTotal=$[$gcTotal + $item]
+ count=$[$count + 1]
+
+ if [ $item -gt $max ] ; then
+ max=$item
+ fi
+
+ if [ $item -lt $min ] ; then
+ min=$item
+ fi
+ done
+
+ # Extract Min/Max/Avg
+ echo "#type:min/max/avg" > $statFile.stats
+
+ if [ $count != 0 ] ; then
+ avg=`echo $gcTotal / $count | bc -l`
+
+ echo "$label:$min/$max/$avg" >> $statFile.stats
+ echo "Done $label Stat generation for $statFile"
+ else
+ echo "$label:-/-/-" >> $statFile.stats
+ echo "Unable to calculate $label stats as no entries found."
+ fi
+
+}
+
+#
+# Take an input file ($1) of lines
+# <value><K|M>
+# and output file $2 of <value> in whole M
+#
+normaliseSizeMemFile()
+{
+rm -f $2
+for i in `cat $1` ; do
+ if [[ `echo $i | grep -c "K" ` == 1 ]] ; then
+ kb=`echo $i|cut -d 'K' -f 1`
+ echo $[ $kb / 1024 ] >> $2
+ else
+ echo $i|cut -d 'M' -f 1 >> $2
+ fi
+done
+
+
+}
+
+
+# Parse command line
+# TODO more advanced processing
+# Check we have enough parameters
+if [ $# != 4 ] ; then
+ # Take one arg to be a graph data file.
+ if [ $# == 1 ] ; then
+ textArray[0]="" # hold text
+ length=0
+ # read whole file in loop
+ while read line
+ do
+ textArray[$length]=$line # store line
+ length=$(expr $length + 1) # increase length by 1
+ done < $1
+
+ if [ $length != 2 ] ; then
+ usage
+ fi
+
+ #Get Title and file name
+ title=${textArray[0]}
+ file=${textArray[1]}
+
+ pushd `dirname $1`
+
+ else
+ usage
+ fi
+else
+ version=$1
+ type=$2
+ volume=$3
+ brokerState=$4
+
+
+ # Configure Graph Title and image file names
+ title="$version $type : $volume% volume"
+ file="$version-$brokerState-$type-$volume"
+fi
+
+work=work
+
+mkdir -p $work
+
+echo -n "Processing GC Usage Data : "
+ParNew=`grep -c ParNew gc.log`
+
+if [ $ParNew != 0 ] ; then
+ echo "CMS log file"
+ processCMSGCFile
+ PLOT="\"GC.Dur.data\" with lines axis x1y1 ti \"ParNew GC Time ($parNewGCCount)\", "
+else
+ echo "G1 log file"
+ processG1GCFile
+ PLOT="\"GC.Dur.data\" with lines axis x1y1 ti \"G1 Young Time ($youngGCCount)\", "
+fi
+
+#
+# Colate Stats
+#
+echo -n "Colating stat Data : "
+echo "#type:min/max/avg" > $file.statistics.txt
+find .. -name "*stats" -exec grep -v "type" {} >> $file.statistics.txt \;
+
+
+# Prepare the plot command
+# If a Full GC occured during this test then plot those
+if [[ $fullGCCount > 0 ]] ; then
+PLOT=$PLOT"\"GC.Dur.data\" using 1:3 with lines axis x1y2 ti \"Cumulative Total Time(ParNew)\", \
+ \"GC.Dur.All.data\" with lines axis x1y2 ti \"Cumulative Total Time(All)\", \
+ \"GC.Full.data\" with points ti \"Full GCs Time ($fullGCCount)\" "
+else
+PLOT=$PLOT"\"GC.Dur.data\" using 1:3 with lines axis x1y2 ti \"Cumulative Total Time(ParNew)\", \
+ \"GC.Dur.All.data\" with lines axis x1y2 ti \"Cumulative Total Time(All)\" "
+fi
+
+if [ $ParNew != 0 ] ; then
+ gcs=$parNewGCCount
+else
+ gcs=$youngGCCount
+fi
+
+# Call out to gnuplot to generate graphs
+# Only do this if we have data
+if [ $gcs != 0 ] ; then
+ # Generate the Heap Graph and the GC Duration Graph
+ gnuplot << EOGNUPLOT
+set xlabel "Run Time(s)"
+
+set title "$title : Heap Size"
+set term png
+set output "$file-Heap.png"
+set ylabel "MB" +0.5,0
+plot "GC.Heap.data" using 1:2 with lines axis x1y1 ti "GC Size Post",\
+ "GC.Heap.data" using 1:4 with lines axis x1y1 ti "GC Size Max ", \
+ "GC.Heap.data" using 1:3 with lines axis x1y1 ti "GC Size Pre "
+
+set y2tics nomirror
+set ytics nomirror
+set key top left
+set title "$title GC Time"
+set ylabel "Time(s)" +0.5,0
+set y2label "Total Time(s)"
+set output "$file-GCDuration.png"
+plot $PLOT
+EOGNUPLOT
+
+else
+ echo "Warning: No GC Data to graph"
+fi
+
+# pop back to further process for CPU usage
+popd &> /dev/null
+
+echo "Processing CPU Usage Data"
+
+# CPU.data is just TimeStamp + %age
+cat broker_cpu.log |awk '{print $1 "T" $2 " " $3}' > $work/CPU.data
+
+# Move to work directory to run gnuplot
+pushd $work &> /dev/null
+
+# Call out to gnuplot to generate graphs
+# Generate the Heap Graph and the GC Duration Graph
+gnuplot << EOGNUPLOT
+set term png
+set title "$title : CPU"
+set output "$file-CPU.png"
+unset key
+set xlabel "Real Time(h:m)"
+set ylabel "Usage(%)"
+
+set xdata time
+set timefmt "%Y-%m-%dT%H:%M:%S"
+set format x "%H:%M"
+plot "CPU.data" using 1:2 with lines
+EOGNUPLOT
+
+#"
+# Pop out of $work directory.
+#
+popd &> /dev/null \ No newline at end of file
diff --git a/qpid/java/perftests/bin/processing/processAll.sh b/qpid/java/perftests/bin/processing/processAll.sh
new file mode 100755
index 0000000000..895d59c316
--- /dev/null
+++ b/qpid/java/perftests/bin/processing/processAll.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+usage()
+{
+ echo "processResults.sh <search dir>"
+ exit 1
+}
+
+root=`pwd`
+echo $root
+graphFile=graph.data
+
+if [ $# != 1 ] ; then
+ usage
+fi
+
+mkdir -p results
+
+for file in `find $1 -name $graphFile` ; do
+
+ dir=`dirname $file`
+
+ echo Processing : $dir
+ pushd $dir &> /dev/null
+
+ $root/process.sh $graphFile
+
+ echo Copying Images
+ cp work/*png $root/results/
+
+ echo Copying Stats
+ cp work/*.statistics.txt $root/results/
+
+
+ popd &> /dev/null
+ echo Done
+done \ No newline at end of file
diff --git a/qpid/java/perftests/bin/processing/processTests.py b/qpid/java/perftests/bin/processing/processTests.py
new file mode 100755
index 0000000000..8db44eda79
--- /dev/null
+++ b/qpid/java/perftests/bin/processing/processTests.py
@@ -0,0 +1,850 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import os
+import re
+import datetime
+import sys
+import string
+from optparse import OptionParser
+from datetime import datetime, timedelta
+import shutil
+
+
+def showUsage():
+ log("./processTests.py [-b|--broker-log-dir] <dir> [-t|--test-dir] <dir>")
+
+
+ACCESS="Access"
+MODIFY="Modify"
+
+BROKER_LOG="broker.log"
+BROKER_PID="broker.pid"
+BROKER_CPU="broker_cpu.log"
+BROKER_CPU_DATED="broker_cpu.log.dated"
+BROKER_STATS="broker.stats"
+BROKER_GC="gc.log"
+
+GRAPH_DATA="graph.data"
+
+_verbose = False
+_debug = False
+_brokerLogs = ""
+
+def exitError(message):
+ log(message)
+ sys.exit(1)
+
+def main():
+ global _log, _verbose, _debug, _brokerLogs
+
+ # Load the
+ parser = OptionParser()
+
+ parser.add_option("-v", "--verbose", dest="verbose",
+ action="store_true", default=False, help="enable verbose output")
+
+ parser.add_option("-d", "--debug", dest="debug",
+ action="store_true", default=False, help="enable debug output")
+
+ parser.add_option("-b", "--broker-log-dir", dest="brokerLogs",
+ action="store", default=True, help="Broker Logs")
+
+ parser.add_option("-t", "--test-dir", dest="testDir",
+ action="store", default="", help="Test Results")
+
+
+ (options, args) = parser.parse_args()
+
+ _verbose = options.verbose
+ _debug = options.debug
+ testDir = options.testDir
+ _brokerLogs = options.brokerLogs
+
+ if testDir == "" or _brokerLogs == "" :
+ log("Broker Log Dir and Test Dir are both requried.")
+ showUsage()
+
+ if not os.path.exists(testDir):
+ exitError("Test directory does not exist:" + testDir)
+
+ if not os.path.exists(_brokerLogs):
+ exitError("Broker log directory does not exist:" + _brokerLogs)
+
+
+ # Standardize the format of the broker logs
+ preProcessBrokerLogs(_brokerLogs)
+
+ # Get list of test results from test_dir
+ processTestResults(testDir)
+
+#
+# Process the log files we know of
+#
+def preProcessBrokerLogs(resultDir):
+
+ print "Pre Processing Broker Logs"
+ # Pre-Process GC - no pre processing required
+
+ # Process Log4j - no processing required as file is already time stamped.
+
+ # Pre-Process broker_cpu
+ processCPUUsage(resultDir)
+
+#
+# Process the broker CPU log file and create an output file of format
+# <Date Time> <CPU Usage>
+#
+#
+def processCPUUsage(resultDir):
+ logfile=resultDir+os.sep+BROKER_CPU
+ datedFile=resultDir+os.sep+BROKER_CPU_DATED
+
+ start = extractTime(ACCESS, logfile+".stat")
+
+ pid = getPID(BROKER_PID)
+
+ topRate = getFirstLine(_brokerLogs+os.sep+"top.rate")
+
+ #
+ # Calulate addition required per process line output
+ #
+ if topRate.find(".") == -1:
+ seconds = topRate
+ millis = 0
+ else:
+ split = topRate.split('.')
+ seconds = split[0]
+ # Convert
+ millis = float("0."+split[1]) * 1000
+
+ offset = timedelta(seconds=int(seconds),milliseconds=int(millis))
+
+ #
+ # Process the CPU log file and make a file of format:
+ # datetime <CPU% usage> <MEM% usage>
+ #
+ # Open log CPU file for reading
+ logfile = open(logfile, "r")
+
+ # Open the output file, erasing any existing version
+ # Keep track of the min/max sum and entries,.
+ minCPU=float(sys.maxint)
+ maxCPU=0.0
+ minMem=float(sys.maxint)
+ maxMem=0.0
+
+ entries=0
+ sumCPU=0.0
+ sumMem=0.0
+
+ output= open(datedFile, "w")
+ for line in logfile:
+ #
+ # Data format
+ # 0 1 2 3 4 5 6 7 8 9 10 11
+ # PID USER PR NI %CPU TIME+ %MEM VIRT RES SHR S COMMAND
+ # PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ #
+ # %CPU and %MEM are vary, probably based on os/version of top.
+ # lets auto-detect where it is.
+ #
+ # Index is 0 based for array usage.
+ index = 0
+ if line.find("PID") != -1:
+ for key in line.split(" "):
+ strippedKey = key.lstrip()
+ if len(strippedKey) > 0:
+ # Record the key index
+ if (strippedKey == "%CPU"):
+ cpuIndex=index
+ if (strippedKey == "%MEM"):
+ memIndex=index
+ # Increase count for next key
+ index = index + 1
+
+
+ # Find lines that contain our broker process
+ if line.find("QPBRKR") != -1:
+
+ # Split line on whitespace
+ data = line.split()
+
+ #Write out the date time (ISO-8601 format)
+ output.write(str(start))
+ # Output the %CPU value
+ output.write(" "+str(data[cpuIndex]))
+ # Output the %MEM value
+ output.write(" "+str(data[memIndex]))
+ output.write('\n')
+
+ # Add the offset based on the logging rate
+ start = start + offset
+
+ # Record entires
+ entries = entries + 1
+
+ # Record Metrics
+ # Record CPU data
+ cpu = float(data[cpuIndex])
+ if (cpu < minCPU):
+ minCPU = cpu
+
+ if (cpu > maxCPU):
+ maxCPU = cpu
+
+ sumCPU = sumCPU + cpu
+
+ # Record Mem data
+ mem = float(data[memIndex])
+
+ if (mem < minMem):
+ minMem = mem
+
+ if (mem > maxMem):
+ maxMem = mem
+
+ sumMem = sumMem + mem
+
+
+ #end for
+
+ # Close the files
+ logfile.close
+ output.close
+
+ # Output stats file
+ statFile=resultDir+os.sep+BROKER_CPU+".stats"
+ output= open(statFile, "w")
+ output.write("#type:min/max/avg")
+ output.write('\n')
+ output.write("CPU:"+str(minCPU)+"/"+str(maxCPU)+"/"+str(float(sumCPU)/float(entries)))
+ output.write('\n')
+ output.write("MEM:"+str(minMem)+"/"+str(maxMem)+"/"+str(float(sumMem)/float(entries)))
+ output.write('\n')
+ output.close
+
+ log("Pre Process of CPU Log file '"+BROKER_CPU+"' complete")
+
+
+#
+# Give an known process type get the recorded PID.
+#
+def getPID(process):
+ return getFirstLine(_brokerLogs+os.sep+process)
+
+#
+# Get the first line of the file without EOL chars.
+# NOTE: this will load the entire file into memory to do it.
+#
+def getFirstLine(fileName):
+ f = open(fileName,"r")
+ line = f.read().splitlines()[0]
+ f.close
+ return line
+
+
+#
+# Walk the directory given and process all csv test results
+#
+def processTestResults(resultDir):
+ for root, dirs, files in os.walk(resultDir, topdown=False):
+ if len(files) == 0:
+ exitError("Test result directory is empty:" + resultDir)
+ for file in files:
+ if file.endswith(".csv"):
+ processTestResult(root , file)
+
+def processTestResult(root, resultFile):
+ # Open stat file and extract test times, we determine:
+ # -start time based on the 'Access' value
+ # -end time based on the 'Modify' value 'Change' would also work
+
+ statFile=root+os.sep+resultFile+".stat"
+
+ if not os.path.exists(statFile):
+ log("Unable to process : Unable to open stat file:" + statFile)
+ return
+
+
+ createResultSetPackage(root, resultFile)
+
+
+def extractTime(field, statFile):
+ stats = open(statFile, "r")
+ for line in stats:
+ if line.startswith(field):
+ if line.find("(") == -1:
+ dt = lineToDate(" ".join(line.split()[1:]))
+
+ #
+ # TODO We need to handle time time zone issues as I'm sure we will have issues with the
+ # log4j matching.
+
+ stats.close
+ return dt
+
+#
+# Given a text line in ISO format convert it to a date object
+#
+def lineToDate(line):
+ #2009-06-22 17:04:44,320
+ #2009-06-22 17:04:44.320
+ pattern = re.compile(r'(?P<year>^[0-9][0-9][0-9][0-9])-(?P<month>[0-9][0-9])-(?P<day>[0-9][0-9]) (?P<hour>[0-9][0-9]):(?P<minute>[0-9][0-9]):(?P<seconds>[0-9][0-9])')
+
+
+ m = pattern.match(line)
+ if m:
+ year = int(m.group('year'))
+ month = int(m.group('month'))
+ day = int(m.group('day'))
+ hour = int(m.group('hour'))
+ minute = int(m.group('minute'))
+ seconds = int(m.group('seconds'))
+
+ pattern = re.compile(r'(?P<year>^[0-9][0-9][0-9][0-9])-(?P<month>[0-9][0-9])-(?P<day>[0-9][0-9]) (?P<hour>[0-9][0-9]):(?P<minute>[0-9][0-9]):(?P<seconds>[0-9][0-9])[.|,](?P<micro>[0-9]+)')
+ m = pattern.match(line)
+ micro = None
+ if m:
+ micro = m.group('micro')
+
+ if micro == None:
+ micro = 0
+
+ # Correct issue where micros are actually nanos
+ if int(micro) > 999999:
+ micro = int(micro) / 1000
+
+ return datetime(year,month,day,hour,minute,seconds,int(micro))
+ else:
+ # Error we shouldn't get here
+ return null
+
+def createResultSetPackage(root, resultFile):
+ # Get the Name of the test to make a directory with said name
+ testName = resultFile.split(".csv")[0]
+ resultDir = root+ os.sep + testName
+
+ log("Processing Result set for:"+ testName)
+
+ mkdir(resultDir)
+
+ # Move result file to new directory
+ shutil.move(root + os.sep + resultFile, resultDir)
+
+ # Move stat file to new directory
+ shutil.move(root + os.sep + resultFile + ".stat", resultDir)
+
+ statFile=resultDir + os.sep + resultFile + ".stat"
+
+ #
+ # Get start and end time for test run
+ #
+ start = extractTime(ACCESS, statFile)
+ end = extractTime(MODIFY, statFile)
+
+ sliceBrokerLogs(resultDir, start, end)
+ createGraphData(resultDir, testName)
+ createTestStatData(resultDir, testName)
+
+ log("Created Result Package for:"+ testName)
+
+def sliceBrokerLogs(resultDir, start, end):
+ sliceCPULog(resultDir, start, end)
+ sliceLog4j(resultDir, start, end)
+ sliceGCLog(resultDir, start, end)
+
+
+def sliceCPULog(resultDir, start, end):
+ global _brokerLogs
+ logfilePath=_brokerLogs+os.sep+BROKER_CPU_DATED
+ cpuSliceFile=resultDir+os.sep+BROKER_CPU
+
+ # Process the CPU log file and make a file of format:
+ # datetime <CPU% usage> <MEM% usage>
+ #
+ # Open log CPU file for reading
+ logFile = open(logfilePath, "r")
+
+ # Open the output file, erasing any existing version
+ # Keep track of the min/max sum and entries,.
+ minCPU=float(sys.maxint)
+ maxCPU=0.0
+ minMem=float(sys.maxint)
+ maxMem=0.0
+
+ entries=0
+ sumCPU=0.0
+ sumMem=0.0
+
+ #
+ # Create outputfile
+ #
+ cpuslice = open(cpuSliceFile,"w")
+ for line in logFile:
+ data = line.split()
+ #
+ # //fixme remove tz addition.
+ #
+ lineTime = lineToDate(" ".join(data[0:2])+" +0000")
+
+ if lineTime > start:
+ if lineTime < end:
+ # Write the data though to the new file
+ cpuslice.writelines(line)
+
+ # Perform stat processing for the min/max/avg
+
+ data = line.split()
+
+ #
+ # Data format is
+ # <Date> <Time> <%CPU> <%MEM>
+ # 2010-02-19 10:16:17 157 28.1
+ #
+ cpuIndex = 2
+ memIndex = 3
+
+ # Record entires
+ entries = entries + 1
+
+ # Record Metrics
+ # Record CPU data
+ cpu = float(data[cpuIndex])
+ if (cpu < minCPU):
+ minCPU = cpu
+
+ if (cpu > maxCPU):
+ maxCPU = cpu
+
+ sumCPU = sumCPU + cpu
+
+ # Record Mem data
+ mem = float(data[memIndex])
+
+ if (mem < minMem):
+ minMem = mem
+
+ if (mem > maxMem):
+ maxMem = mem
+
+ sumMem = sumMem + mem
+
+ logFile.close()
+ cpuslice.close()
+ log("Sliced CPU log")
+
+ # Output stats file
+ statFile=cpuSliceFile+".stats"
+ output= open(statFile, "w")
+ output.write("#type:min/max/avg")
+ output.write('\n')
+ output.write("CPU:"+str(minCPU)+"/"+str(maxCPU)+"/"+str(float(sumCPU)/float(entries)))
+ output.write('\n')
+ output.write("MEM:"+str(minMem)+"/"+str(maxMem)+"/"+str(float(sumMem)/float(entries)))
+ output.write('\n')
+ output.close
+
+ log("Generated stat data from CPU Log file")
+
+
+def sliceGCLog(resultDir, start, end):
+ global _brokerLogs
+ logfilePath=_brokerLogs+os.sep+BROKER_GC
+ sliceFile=resultDir+os.sep+BROKER_GC
+
+ gcstart = extractTime(ACCESS, logfilePath+".stat")
+
+ # Open the output file, erasing any existing version
+ # Keep track of the min/max sum and entries,.
+ minGCDuration=float(sys.maxint)
+ maxGCDuration=0.0
+ sumGCDuration=0.0
+ entriesGCDuration = 0
+
+
+ # Open log GC file for reading
+ logFile = open(logfilePath, "r")
+
+ # Open the output file, erasing any existing version
+ output= open(sliceFile, "w")
+
+ # Use a regular expression to pull out the Seconds.Millis values from the
+ # Start of the gc log line.
+ pattern = re.compile(r'(?P<seconds>^[0-9]+)\.(?P<millis>[0-9]+):')
+
+ for line in logFile:
+ m = pattern.match(line)
+
+ if m:
+ seconds = m.group('seconds');
+ millis = m.group('millis');
+
+ offset = timedelta(seconds=int(seconds),milliseconds=int(millis))
+
+ lineTime = gcstart + offset
+
+ if lineTime > start:
+ if lineTime < end:
+ output.writelines(line)
+ # Perform stat processing for the min/max/avg
+
+ # Process GC Duration lines in ParNew gc ,
+ # ensure we do not have CMS printed as that means the line line has been corrupted
+ if line.find("ParNew") != -1 & line.find("CMS") == -1:
+
+ #
+ # Example data line
+ # 7.646: [GC 7.646: [ParNew: 14778K->461K(14784K), 0.0026610 secs] 49879K->36609K(73288K), 0.0027560 secs] [Times: user=0.01 sys=0.00, real=0.01 secs]
+ #
+ # So entry 5 is the ParNew time and 8 is the whole GC cycle. 14 entries total
+
+ data = line.split()
+
+ gcTime = 0
+ # Check we have a valid ParNew Line
+ if (len(data) == 15):
+
+ # Record entires
+ # Record GC Duration data
+
+ entriesGCDuration = entriesGCDuration + 1
+ gcTime = float(data[8])
+
+ if (gcTime < minGCDuration):
+ minGCDuration = gcTime
+
+ if (gcTime > maxGCDuration):
+ maxGCDuration = gcTime
+
+ sumGCDuration = sumGCDuration + gcTime
+
+
+
+ # Close the files
+ logFile.close
+ output.close()
+
+ log("Sliced gc log")
+
+ # Output stats file
+ statFile=sliceFile+".stats"
+ output= open(statFile, "w")
+ output.write("#type:min/max/avg")
+ output.write('\n')
+ #
+ # Only provide GCDuration if it was processed
+ #
+ output.write("GC_DUR:%.14f/%.14f/%.14f" % (minGCDuration, maxGCDuration , (sumGCDuration/float(entriesGCDuration))))
+ output.write('\n')
+ output.close
+
+ log("Generated stat data from CPU Log file")
+
+
+def sliceLog4j(resultDir, start, end):
+ global _brokerLogs
+ logfilePath=_brokerLogs+os.sep+BROKER_LOG
+ log4jSliceFile=resultDir+os.sep+BROKER_LOG
+
+ log4jstart = extractTime(ACCESS, logfilePath+".stat")
+
+ #
+ # Say that first line is the start of the file,
+ # This value will give a time value to the initial
+ # logging before Log4j kicks in.
+ #
+ lineTime = log4jstart
+
+ # Process the broker log4j file
+ # Open log CPU file for reading
+ logFile = open(logfilePath, "r")
+
+ #
+ # Create outputfile
+ #
+ log4jslice = open(log4jSliceFile,"w")
+
+ for line in logFile:
+ data = line.split()
+
+ #
+ # If the line has a time at the start then process it
+ # otherwise use the previous time. This means if there is
+ # a stack trace in the middle of the log file then it will
+ # be copied over to the split file as long as it is in the
+ # split time.
+ #
+ if (hasTime(data)):
+ #
+ # //fixme remove tz addition.
+ #
+ lineTime = lineToDate(" ".join(data[0:2])+" +0000")
+
+ if lineTime > start:
+ if lineTime < end:
+ print line
+ log4jslice.writelines(line)
+
+ logFile.close()
+ log4jslice.close()
+ log("Sliced broker log")
+
+
+#
+# Check the first two entries of data can make a datetime object
+#
+def hasTime(data):
+ date = data[0]
+ time = data[1]
+
+ # Examples:
+ # 2009-06-22 17:04:44,246
+ # 2009-06-22 17:04:44.2464
+ # 2009-06-22 17:04:44
+
+ # ISO-8601 '-' format date
+ dateRE = re.compile('[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]')
+
+ #
+ # Check for times with/out millis
+ # e.g.
+ # 10:00:00,000 - log4j
+ # 10:00:00.0000 - generated in script for cpu time
+ #
+ timeRE = re.compile('[0-9][0-9]:[0-9][0-9]:[0-9][0-9]?[0-9]*')
+
+ return dateRE.match(date) and timeRE.match(time)
+
+def createGraphData(resultDir, testName):
+ # Create graph.data file for process.sh
+ # Format two lines : Title and filename
+ # $version $type : $volume% volume
+ # $version-$brokerState-$type-$volume
+ version=getBrokerVersion()
+
+ test= extractTestValue("n",resultDir, testName)
+ volume = int(float(extractTestResult("Test * Size Throughput", resultDir, testName)) * 1000)
+ messageSize = extractTestValue("messageSize",resultDir, testName)
+ ackMode = ackModeToString(extractTestValue("consAckMode",resultDir, testName))
+
+ graphDataFile=resultDir+os.sep+GRAPH_DATA
+
+ graphData = open(graphDataFile, "w")
+
+ #
+ # Write Title
+ graphData.write(version+":"+test+":"+str(messageSize)+"kb x "+str(volume)+" msg/sec using "+ackMode)
+ graphData.write('\n')
+
+ #
+ # Write FileName
+ graphData.writelines(version+"-"+testName)
+ graphData.write('\n')
+ graphData.close
+ log("Created graph.data")
+
+
+def getBrokerVersion():
+ global _brokerLogs
+ READY = "Qpid Broker Ready"
+ brokerLogFile = _brokerLogs + os.sep + BROKER_LOG
+
+ log = open(brokerLogFile, "r")
+
+ dataLine = ""
+ for line in log:
+ if line.find(READY) != -1:
+ dataLine = line
+ break
+
+ # Log Entry
+ #2009-06-19 17:04:02,493 INFO [main] server.Main (Main.java:456) - Qpid Broker Ready :2.3.0.1 build: 727403M
+ # Split on READY
+ data = dataLine.split(READY)
+
+ # So [1] should be
+ # :2.3.0.1 build: 727403M
+ readyEntries = data[1].split()
+
+ # so spliting on white space should give us ':version'
+ # and a quick split on ':' will give us the version
+ version = readyEntries[0].split(':')[1]
+
+ # Strip to ensure we have no whitespace
+ return version.strip()
+
+
+def extractTestValue(property,resultDir,testName):
+ return extractTestData(property,resultDir,testName," =")
+
+def extractTestResult(property,resultDir,testName):
+ return extractTestData(property,resultDir,testName,":")
+
+def extractTestData(property,resultDir,testName,type):
+ resultFile = resultDir + os.sep + testName+".csv"
+
+ results = open(resultFile, "r")
+
+ dataLine = ""
+ for line in results:
+ if line.find("Total Tests:") == 0:
+ dataLine = line
+
+ results.close()
+
+ # Data is CSV
+ data = dataLine.split(',')
+
+ found = False
+ result = ""
+ searchProperty = property+type
+
+ for entry in data:
+ if found:
+ result = entry
+ break
+ if entry.strip() == searchProperty:
+ found=True
+
+ return result.strip()
+
+def createTestStatData(resultDir, testName):
+ csvFilePath=resultDir + os.sep + testName + ".csv"
+
+ # Open the output file, erasing any existing version
+ # Keep track of the min/max sum and entries,.
+ minLatency=float(sys.maxint)
+ maxLatency=0.0
+ minThroughput=float(sys.maxint)
+ maxThroughput=0.0
+
+ entries=0
+ sumLatency=0.0
+ sumThroughput=0.0
+
+ #
+ # Open csv File
+ #
+ csvFile = open(csvFilePath,"r")
+ for line in csvFile:
+
+ # The PingAsyncTestPerf test class outputs the latency and throughput data.
+ if line.find("PingAsyncTestPerf") != -1:
+ #
+ # Data format is
+ # <Test> <TestName> <Thread> <Status> <Time> <Latency> <Concurrency> <Thread> <TestSize>
+ #org.apache.qpid.ping.PingAsyncTestPerf, testAsyncPingOk, Dispatcher-Channel-1, Pass, 209.074, 219.706, 0, 1, 10
+ LatencyIndex = 5
+ ThroughputIndex = 4
+
+ # The PingLatencyTestPerf test class just outputs the latency data.
+ if line.find("PingLatencyTestPerf") != -1:
+ #
+ # Data format is
+ # <Test> <TestName> <Thread> <Status> <Time> <Latency> <Concurrency> <Thread> <TestSize>
+ # org.apache.qpid.ping.PingLatencyTestPerf, testPingLatency, Dispatcher-Channel-1, Pass, 397.05502, 0, 2, 1000
+ LatencyIndex = 4
+ ThroughputIndex = -1
+
+
+ # Only process the test lines that have 'org.apache.qpid.ping', i.e. skip header and footer.
+ if line.find("org.apache.qpid.ping") != -1:
+ # Perform stat processing for the min/max/avg
+ data = line.split(",")
+
+ # Record entires
+ entries = entries + 1
+
+ # Record Metrics
+ # Record Latency data
+ latency = float(data[LatencyIndex])
+ if (latency < minLatency):
+ minLatency = latency
+
+ if (latency > maxLatency):
+ maxLatency = latency
+
+ sumLatency = sumLatency + latency
+
+ if (ThroughputIndex != -1):
+ # Record Latency data
+ throughput = float(data[ThroughputIndex])
+ if (throughput < minThroughput):
+ minThroughput = throughput
+
+ if (throughput > maxThroughput):
+ maxThroughput = throughput
+
+ sumThroughput = sumThroughput + throughput
+
+
+ csvFile.close()
+
+ # Output stats file
+ statFile=resultDir + os.sep + testName+".stats"
+ output= open(statFile, "w")
+ output.write("#type:min/max/avg")
+ output.write('\n')
+ output.write("LATENCY:"+str(minLatency)+"/"+str(maxLatency)+"/"+str(float(sumLatency)/float(entries)))
+ output.write('\n')
+
+ if (ThroughputIndex != -1):
+ # Output msgs/sec based on time for a batch of msgs
+ output.write("THROUGHPUT:"+str(float(1000)/maxThroughput)+"/"+str(float(1000)/minThroughput)+"/"+str(float(1000)/(float(sumThroughput)/float(entries))))
+ output.write('\n')
+
+ output.close
+
+ log("Generated stat data from test "+testName+" CSV file")
+
+
+def ackModeToString(ackMode):
+ if ackMode == '0':
+ return "Transacted"
+ elif ackMode == '1':
+ return "AutoAck"
+ elif ackMode == '2':
+ return "ClientAck"
+ elif ackMode == '3':
+ return "DupsOK"
+ elif ackMode == '257':
+ return "NoAck"
+ elif ackMode == '258':
+ return "PreAck"
+ else:
+ return str(ackMode)
+
+
+
+def debug(msg):
+ global _debug
+ if _debug:
+ log(msg)
+
+def log(msg):
+ print msg
+
+def mkdir(dir):
+ if not os.path.exists(dir):
+ os.mkdir(dir)
+
+if __name__ == "__main__":
+ main() \ No newline at end of file
diff --git a/qpid/java/perftests/bin/run_many.sh b/qpid/java/perftests/bin/run_many.sh
new file mode 100755
index 0000000000..cca2ffec21
--- /dev/null
+++ b/qpid/java/perftests/bin/run_many.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# args:
+# <number of processes to start>
+# <name of run>
+# <command ro run>
+
+for i in `seq 1 $1`; do
+ $3 >$2.$i.out 2>>$2.err &
+ echo $! > $2.$i.pid
+done;
diff --git a/qpid/java/perftests/bin/topicListener.sh b/qpid/java/perftests/bin/topicListener.sh
new file mode 100755
index 0000000000..3a925910ad
--- /dev/null
+++ b/qpid/java/perftests/bin/topicListener.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# XXX -Xmx512m -Xms512m -XX:NewSize=150m
+QPID_LIBS=$QPID_HOME/lib/qpid-all.jar
+
+# Set other variables used by the qpid-run script before calling
+export JAVA=java \
+ JAVA_VM=-server \
+ JAVA_MEM="-Xmx128m -Dlog4j.configuration=$HOME/log4j.properties" \
+ JAVA_GC="-XX:-UseConcMarkSweepGC -XX:+HeapDumpOnOutOfMemoryError" \
+ QPID_CLASSPATH=$QPID_LIBS
+
+. qpid-run -Damqj.logging.level="INFO" org.apache.qpid.oldtopic.Listener $*
diff --git a/qpid/java/perftests/bin/topicPublisher.sh b/qpid/java/perftests/bin/topicPublisher.sh
new file mode 100755
index 0000000000..e4e9981a75
--- /dev/null
+++ b/qpid/java/perftests/bin/topicPublisher.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# XXX -Xmx512m -Xms512m -XX:NewSize=150m
+QPID_LIBS=$QPID_HOME/lib/qpid-all.jar
+
+# Set other variables used by the qpid-run script before calling
+export JAVA=java \
+ JAVA_VM=-server \
+ JAVA_MEM=-Xmx128m \
+ JAVA_GC="-XX:-UseConcMarkSweepGC -XX:+HeapDumpOnOutOfMemoryError" \
+ QPID_CLASSPATH=$QPID_LIBS
+
+. qpid-run -Damqj.logging.level="INFO" org.apache.qpid.oldtopic.Publisher $*