Nantes Université
Skip to content
GitLab
Explorer
Connexion
S'inscrire
Navigation principale
Rechercher ou aller à…
Projet
L
Lgeval
Gestion
Activité
Membres
Labels
Programmation
Tickets
Tableaux des tickets
Jalons
Wiki
Code
Requêtes de fusion
Dépôt
Branches
Validations
Étiquettes
Graphe du dépôt
Comparer les révisions
Extraits de code
Compilation
Pipelines
Jobs
Planifications de pipeline
Artéfacts
Déploiement
Releases
Registre de paquets
Registre de conteneur
Registre de modèles
Opération
Environnements
Modules Terraform
Surveillance
Incidents
Service d'assistance
Analyse
Données d'analyse des chaînes de valeur
Analyse des contributeurs
Données d'analyse CI/CD
Données d'analyse du dépôt
Expériences du modèle
Aide
Aide
Support
Documentation de GitLab
Comparer les forfaits GitLab
Forum de la communauté
Contribuer à GitLab
Donner votre avis
Raccourcis clavier
?
Extraits de code
Groupes
Projets
Afficher davantage de fils d'Ariane
CROHME
Lgeval
Validations
33cba8da
Valider
33cba8da
rédigé
il y a 3 ans
par
rlaz
Parcourir les fichiers
Options
Téléchargements
Correctifs
Plain Diff
Evaluate script improved
parent
d060e2b4
Aucune branche associée trouvée
Branches contenant la validation
Aucune étiquette associée trouvée
Aucune requête de fusion associée trouvée
Modifications
1
Masquer les modifications d'espaces
En ligne
Côte à côte
Affichage de
1 fichier modifié
bin/evaluate
+46
-29
46 ajouts, 29 suppressions
bin/evaluate
avec
46 ajouts
et
29 suppressions
bin/evaluate
+
46
−
29
Voir le fichier @
33cba8da
...
...
@@ -35,7 +35,6 @@ then
echo
" labelsGT.txt: list of node and edge labels in ground truth"
echo
" labelsOutput.txt: list of node and edge labels in output files"
echo
""
echo
" Metrics/: directory with .csv (metric) and .diff (difference) files"
echo
" graphErrors/: if dot output requested, visualizations for files with"
echo
-e
"
\t\t
errors are stored here (.dot and .pdf[default] or .png or both as specified)."
echo
""
...
...
@@ -53,6 +52,10 @@ FORMAT="pdf"
TARGETS
=
""
TARGET_COUNT
=
0
OUTPUTS
=
""
NL
=
$'
\n
'
OUTCOME_LIST
=
""
ResultsDir
=
Results_
$BNAME
################################################################
# Compile the list of output files and ground truth files.
...
...
@@ -61,6 +64,8 @@ OUTPUTS=""
# output files are ignored.
################################################################
echo
"[ LgEval evaluate ]"
# Case 1: Passed a list of file pairs
if
!
[
-d
$1
]
then
...
...
@@ -107,13 +112,13 @@ else
FORMAT
=
$4
fi
fi
echo
"* LgEval Results Directory:
$ResultsDir
"
TARGET_COUNT
=
$((
`
echo
$TARGETS
|
wc
-w
`
))
################################################################
# Create output directory structure, compile class labels
################################################################
ResultsDir
=
Results_
$BNAME
if
!
[
-d
$ResultsDir
]
then
mkdir
$ResultsDir
...
...
@@ -136,14 +141,13 @@ then
fi
fi
# Compile labels from ground truth. This is needed for confusion matrices to
# be properly defined, and for sanity checking results.
echo
"
$TARGETS
"
>
$ResultsDir
/t
fileTarge
t
python3
$LgEvalDir
/src/compileLabels.py
"
$ResultsDir
/t
fileTarge
t"
>
"
$ResultsDir
/labelsGT.txt"
echo
"
$OUTPUTS
"
>
$ResultsDir
/t
fileTarge
t
python3
$LgEvalDir
/src/compileLabels.py
"
$ResultsDir
/t
fileTarge
t"
>
"
$ResultsDir
/labelsOutput.txt"
rm
$ResultsDir
/t
fileTarge
t
echo
"
$TARGETS
"
>
$ResultsDir
/t
emp_file_lis
t
python3
$LgEvalDir
/src/compileLabels.py
"
$ResultsDir
/t
emp_file_lis
t"
>
"
$ResultsDir
/labelsGT.txt"
echo
"
$OUTPUTS
"
>
$ResultsDir
/t
emp_file_lis
t
python3
$LgEvalDir
/src/compileLabels.py
"
$ResultsDir
/t
emp_file_lis
t"
>
"
$ResultsDir
/labelsOutput.txt"
rm
$ResultsDir
/t
emp_file_lis
t
################################################################
...
...
@@ -174,23 +178,20 @@ do
# NOTE: the script convertCrohmeLg can be used to convert
# crohme .inkml files to .lg files.
CORRECT
=
"Correct"
#echo -ne " >> Comparing $FNAME.lg"
# RZ: Repairing to avoid running evaluation twice.
python3
$LgEvalDir
/src/evallg.py
$nextFile
$file
INTER
>
$ResultsDir
/Metrics/
$FNAME
.csv
METRICS
=
`
grep
-v
"
\*
"
$ResultsDir
/Metrics/
$FNAME
.csv
`
echo
$METRICS
>
$ResultsDir
/Metrics/
$FNAME
.csv
DIFF
=
`
grep
"
\*
"
$ResultsDir
/Metrics/
$FNAME
.csv
`
#echo "$METRICS"
#read V
#echo "$DIFF"
#read V
# If differences reported, record them
if
[
-n
"
$DIFF
"
]
# RZ: Run evaluation once vs. twice
OUT
=
`
python3
$LgEvalDir
/src/evallg.py
$nextFile
$file
INTER
`
# Match asterisk at beginning of line to select differences/errors
# WARNING: Double quotes are important to preserve newlines!
DIFF
=
`
echo
"
$OUT
"
|
grep
"
\*
"
`
echo
"
$DIFF
"
>
$ResultsDir
/Metrics/
$FNAME
.diff
echo
"
$OUT
"
|
grep
-v
"
\*
"
>
$ResultsDir
/Metrics/
$FNAME
.csv
# If differences reported, record files with errors, generate visualizations
if
[
"
$DIFF
"
!=
""
]
then
CORRECT
=
"Incorrect"
echo
"
$DIFF
"
>
$ResultsDir
/Metrics/
$FNAME
.diff
# If a third argument is provided, generate a .pdf file to visualize
# differences between graphs.
...
...
@@ -202,6 +203,7 @@ do
else
lg2dot
$nextFile
$file
--graph_type
"
$DOTARG
"
--format
$FORMAT
fi
mv
$FNAME
.dot
$ResultsDir
/errorGraphs/dot
if
[
"
$FORMAT
"
==
"pdf"
]
;
then
mv
$FNAME
.pdf
$ResultsDir
/errorGraphs/pdf
...
...
@@ -212,14 +214,12 @@ do
mv
$FNAME
.png
$ResultsDir
/errorGraphs/png
fi
fi
else
rm
-f
$ResultsDir
/Metrics/
$FNAME
.diff
fi
# Add record of evaluating the file.
echo
"
$nextFile
,
$CORRECT
"
>>
$ResultsDir
/FileResults.csv
OUTCOME_LIST
=
`
printf
"%s
\n
%s"
"
$OUTCOME_LIST
"
"
$nextFile
,
$CORRECT
"
`
else
echo
"
Already processed:
$file
"
echo
"
*
Already processed:
$file
"
fi
INDEX
=
$((
INDEX+1
))
...
...
@@ -231,9 +231,12 @@ done
################################################################
# Compile metrics
# Including summaries and confusion matrices
#
# Stored as individual files to prevent re-computation for user
################################################################
echo
"
$OUTCOME_LIST
"
>>
$ResultsDir
/FileResults.csv
cat
$ResultsDir
/Metrics/
*
.csv
>
$ResultsDir
/
$BNAME
.csv
ALLDIFFS
=
`
ls
$ResultsDir
/Metrics |
grep
.diff
`
if
[
-n
"
$ALLDIFFS
"
]
then
...
...
@@ -243,6 +246,8 @@ else
touch
$ResultsDir
/
$BNAME
.diff
# empty - no errors.
fi
# Compute summaries
python3
$LgEvalDir
/src/sumMetric.py
"
$LABEL_STRING
"
$ResultsDir
/
$BNAME
.csv
>
$ResultsDir
/Summary.txt
python3
$LgEvalDir
/src/sumDiff.py
$ResultsDir
/
$BNAME
.diff
$ResultsDir
/labelsGT.txt html
>
$ResultsDir
/ConfusionMatrices.html
python3
$LgEvalDir
/src/sumDiff.py
$ResultsDir
/
$BNAME
.diff
$ResultsDir
/labelsGT.txt
>
$ResultsDir
/ConfusionMatrices.csv
...
...
@@ -267,11 +272,23 @@ awk -F',' '{ for (i=2;i<=NF;i+=2) printf ("%s%c", $i, i + 2 <= NF ? "," : "\n")}
paste
-d
,
$ResultsDir
/FileResults.csv
$ResultsDir
/Data.csv
>
$ResultsDir
/DataNew.csv
cat
$ResultsDir
/HeaderRow.csv
$ResultsDir
/DataNew.csv
>
$ResultsDir
/FileMetrics.csv
# Clean up
##################################
# Clean up
##################################
rm
-f
$ResultsDir
/Headers.csv
$ResultsDir
/HeaderRow.csv
$ResultsDir
/Data.csv
rm
-f
$ResultsDir
/DataNew.csv
$ResultsDir
/FileResults.csv
rm
-f
$ResultsDir
/
$BNAME
.csv
$ResultsDir
/
$BNAME
.diff
echo
""
echo
"done."
echo
""
echo
"
$ResultsDir
/ contents:"
echo
" Summary.txt -- Evaluation metrics summary"
echo
" FileMetrics.csv -- Raw metrics file"
echo
" labelsOut.txt -- Node & edge labels in output files"
echo
" labelsGT.txt -- Node & edge labels in ground truth files"
echo
" ConfusionMatrices.html -- Readable web page with confusion matrices (HTML)"
echo
" ConfusionMatrices.csv -- Confusion matrix (CSV format)"
echo
" Metrics/ -- Individual file metrics (.csv) & differences (.diff)"
echo
""
This diff is collapsed.
Cliquez pour l'agrandir.
Aperçu
0%
Chargement en cours
Veuillez réessayer
ou
joindre un nouveau fichier
.
Annuler
You are about to add
0
people
to the discussion. Proceed with caution.
Terminez d'abord l'édition de ce message.
Enregistrer le commentaire
Annuler
Veuillez vous
inscrire
ou vous
se connecter
pour commenter