Nantes Université

Skip to content
Extraits de code Groupes Projets
evaluate 3,56 ko
Newer Older
Richard Zanibbi's avatar
Richard Zanibbi a validé
#!/bin/bash

# Make sure that CROHMELibDir and LgEvalDir are defined in
# your shell enviroment, e.g. by including:
#	
#	export LgEvalDir=<path_to_LgEval>
#	export CROHMELibDir=<path_to_CROHMELib>       		
#	export PATH=$PATH:$CROHMELibDir/bin:$LgEvalDir/bin
# 
# in your .bashrc file (the initialization file for bash shell). The PATH
# alteration will add the tools to your search path. 

if [ $# -lt 2 ]
then
	echo "LgEval Label graph evaluation tool"
	echo "Copyright (c) R. Zanibbi, H. Mouchere, 2012-2013"
	echo ""
	echo "Usage: evaluate outputDir groundTruthDir [d/s/b/t(default)]"
	echo ""
	echo "WARNING: this script will remove Results<outputDir> from the"
	echo "current directory to avoid confusion. Make sure to save previous"
	echo "evaluation results for a given directory if you wish to keep them."
	echo ""
	echo "Evaluates all label graph (.lg) files in outputDir against"
	echo "corresponding files in groundTruthDir. groundTruthDir is used"
	echo "to generate the list of files to be compared (i.e. if a file is"
	echo "not in the ground truth directory, it will not be considered)."
	echo ""
	echo "Outputs"
	echo "-----------------------------"
	echo " Results<outputDir>/"
	echo "    Summary : summary of performance metrics"
	echo "    Correct : list outputDir files matching ground truth"
	echo "    Metrics.m : metrics for all .lg files compared"
	echo "    Diffs.diff : all differences between files"
	echo "    ConfusionMatrix.html : node and edge label confusion matrix"
	echo "        (errors only)"
	echo "" 
	echo "    Metrics/ : directory with .m (metric) and .diff (difference) file for"
	echo "      each comparison, along with .dot (GraphViz) and .pdf files for"
	echo "      viewing differences between files."
	echo ""
	echo "NOTE: By default differences are visualized in .pdf files located in Metrics/"
	echo "      using symbol layout trees; a third argument may be provided to"
	echo "      produce other visualizations of structural differences"
	echo "      (see lg2dot for details)."
	exit 0
fi

dir=$1
BNAME=`basename $1`
truthDir=$2
ResultsDir=Results_$BNAME

# Remove existing evaluation directory, and create a new one.
rm -fr $ResultsDir 
mkdir $ResultsDir
mkdir $ResultsDir/Metrics

echo "Output Files: $1"
echo "Ground Truth: $2"
echo ""

# Compute all .m metrics outputs (per-file), and .diff results (per-file).
echo "Evaluating files..."
PREFIX=res_
for file in $truthDir/*.lg
do
	FNAME=`basename $file .lg`
	nextFile=$dir/$FNAME.lg
	echo "  >> Comparing $FNAME.lg"

	python $LgEvalDir/src/evallg.py $nextFile $file m > $ResultsDir/Metrics/$FNAME.m
	DIFF=`python $LgEvalDir/src/evallg.py $nextFile $file diff`
	if [ -n "$DIFF" ]
	then
		echo "$DIFF" > $ResultsDir/Metrics/$FNAME.diff 

		# If a third argument is provided, generate a .pdf file to visualize
		# differences between graphs.
		if [ $# -gt 2 ]
		then
			lg2dot $nextFile $file $3 
			mv $FNAME.dot $FNAME.pdf $ResultsDir/Metrics/
		else
			lg2dot $nextFile $file t
			mv $FNAME.dot $FNAME.pdf $ResultsDir/Metrics/
		fi
	else
		echo "$nextFile" >> $ResultsDir/Correct
	fi
done

# Compile all metrics/diffs,
# and then compute metric summaries and confusion matrices.
cat $ResultsDir/Metrics/*.m > $ResultsDir/Metrics.m
ALLDIFFS=`ls $ResultsDir/Metrics | grep .diff`
if [ -n "$ALLDIFFS" ]
then
	cat $ResultsDir/Metrics/*.diff > $ResultsDir/Diffs.diff
else
	touch $ResultsDir/__NoErrors
	touch $ResultsDir/Diffs.diff  # empty - no errors.
fi

python $LgEvalDir/src/sumMetric.py $ResultsDir/Metrics.m > $ResultsDir/__Summary
python $LgEvalDir/src/sumDiff.py $ResultsDir/Diffs.diff html > $ResultsDir/ConfusionMatrix.html

echo "done."