Commit 1fa7fd99 authored by E144069X's avatar E144069X

Use human intra operator standard deviations for temporal accuracy

parent 46d2efd8
......@@ -9,6 +9,9 @@ import math
import load_data
phasesSTDs = {"tPNa": 1.13,"tPNf":0.50,"t2":0.91,"t3":1.81,"t4":1.34,"t5":1.49,"t6":1.61,"t7":2.93,"t8":5.36,"t9+":4.42,"tM": 5.46,"tSB":3.78,"tB":3.29,"tEB":4.85,"tHB":15}
labelDict = {"tPB2":0,"tPNa":1,"tPNf":2,"t2":3,"t3":4,"t4":5,"t5":6,"t6":7,"t7":8,"t8":9,"t9+":10,"tM":11,"tSB":12,"tB":13,"tEB":14,"tHB":15}
revLabDict = {labelDict[key]:key for key in labelDict.keys()}
# Code taken from https://gist.github.com/PetrochukM/afaa3613a99a8e7213d2efdd02ae4762#file-top_k_viterbi-py-L5
# Credits to AllenNLP for the base implementation and base tests:
# https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py#L174
......@@ -155,22 +158,22 @@ def correlation(predBatch,target,videoNames,paramDict,onlyPairs=True):
timeElapsedTensor = np.genfromtxt("../data/{}/annotations/{}_timeElapsed.csv".format(dataset,videoNames[i]),delimiter=",")[1:,1]
phasesPredDict = phaseToTime(pred,timeElapsedTensor)
phasesTargDict = phaseToTime(target[0],timeElapsedTensor)
phasesTargDict = phaseToTime(target[i],timeElapsedTensor)
commonPhases = list(set(list(phasesPredDict.keys())).intersection(set(list(phasesTargDict.keys()))))
tempDic = paramDict["Temp Accuracy"]
step = (tempDic["maxThres"]-tempDic["minThres"])/tempDic["thresNb"]
thresList = np.arange(tempDic["minThres"],tempDic["maxThres"],step)
thresList = np.arange(tempDic["minThres"],tempDic["maxThres"],tempDic["step"])
accList = []
for i in range(len(thresList)):
timePairs = []
accuracy = 0
for phase in commonPhases:
timePairs.append((phasesPredDict[phase],phasesTargDict[phase]))
if np.abs(phasesPredDict[phase]-phasesTargDict[phase]) <= thresList[i]:
accuracy +=1
accuracy /= len(phasesTargDict.keys())
if phase != 0:
timePairs.append((phasesPredDict[phase],phasesTargDict[phase]))
if np.abs(phasesPredDict[phase]-phasesTargDict[phase]) <= thresList[i]*phasesSTDs[revLabDict[phase]]:
accuracy +=1
accuracy /= len(phasesTargDict.keys()) -1
accList.append(accuracy)
if onlyPairs:
......
......@@ -45,7 +45,7 @@ from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
def evalModel(dataset,partBeg,partEnd,propSetIntFormat,exp_id,model_id,epoch,nbClass,tempAccMinThres,tempAccMaxThres,tempAccThresNb):
def evalModel(dataset,partBeg,partEnd,propSetIntFormat,exp_id,model_id,epoch,nbClass,tempAccMinThres,tempAccMaxThres,tempAccThresStep,tempAccThresToPrint):
'''
Evaluate a model. It requires the scores for each video to have been computed already with the trainVal.py script. Check readme to
see how to compute the scores for each video.
......@@ -83,23 +83,22 @@ def evalModel(dataset,partBeg,partEnd,propSetIntFormat,exp_id,model_id,epoch,nbC
resFilePaths = np.array(list(filter(lambda x:x in videoNameDict.keys(),resFilePaths)))
metricParamDict = {"Temp Accuracy":{"minThres":tempAccMinThres,"maxThres":tempAccMaxThres,"step":tempAccThresStep,"thresToPrint":tempAccThresToPrint}}
thresList = getThresList(metricParamDict)
#Store the value of the f-score of for video and for each threshold
metTun = {}
metricNameList = metrics.emptyMetrDict().keys()
metEval={}
for metricName in metricNameList:
if metricName.find("Accuracy") != -1:
if metricName.find("Temp") != -1:
metEval[metricName] = np.zeros((len(resFilePaths),tempAccThresNb))
metEval[metricName] = np.zeros((len(resFilePaths),len(thresList)))
else:
metEval[metricName] = np.zeros(len(resFilePaths))
if metricName == "Correlation":
metEval[metricName] = []
metricParamDict = {"Temp Accuracy":{"minThres":tempAccMinThres,"maxThres":tempAccMaxThres,"thresNb":tempAccThresNb}}
transMat,priors = torch.zeros((nbClass,nbClass)).float(),torch.zeros((nbClass)).float()
transMat,_ = trainVal.computeTransMat(dataset,transMat,priors,partBeg,partEnd,propSetIntFormat)
......@@ -129,28 +128,35 @@ def evalModel(dataset,partBeg,partEnd,propSetIntFormat,exp_id,model_id,epoch,nbC
metEval["Correlation"] = np.array(metEval["Correlation"])
metEval["Correlation"] = np.corrcoef(metEval["Correlation"][:,0],metEval["Correlation"][:,1])[0,1]
metEval["Temp Accuracy"],meanAccPerThres = agregateTempAcc(metEval["Temp Accuracy"],metricParamDict["Temp Accuracy"])
saveTempAccPerThres(exp_id,model_id,meanAccPerThres)
#Writing the latex table
printHeader = not os.path.exists("../results/{}/metrics.csv".format(exp_id))
with open("../results/{}/metrics.csv".format(exp_id),"a") as text_file:
if printHeader:
print("Model,Accuracy,Accuracy (Viterbi),Correlation,Temp Accuracy",file=text_file)
#print("Model,Accuracy,Accuracy (Viterbi),Correlation,Temp Accuracy",file=text_file)
print("Model,",end="",file=text_file)
print(",".join([key for key in metEval.keys() if key.find("Temp Accuracy") == -1])+",",end="",file=text_file)
print("Temp Accuracy "+str(tempAccThresToPrint),file=text_file)
print(model_id+","+str(metEval["Accuracy"].sum()/totalFrameNb)+","+str(metEval["Accuracy (Viterbi)"].sum()/totalFrameNb)+","\
+str(metEval["Correlation"])+","+str(metEval["Temp Accuracy"].mean()),file=text_file)
+str(metEval["Correlation"])+",",end="",file=text_file)
print(metEval["Temp Accuracy"][np.where(thresList == tempAccThresToPrint)][0],file=text_file)
def agregateTempAcc(acc,accParamDict):
meanAccPerThres = acc.mean(axis=0)
meanAccPerVid = acc.mean(axis=1)
return meanAccPerVid,meanAccPerThres
def saveTempAccPerThres(exp_id,model_id,meanAccPerThres):
def getThresList(paramDict):
tempDic = paramDict["Temp Accuracy"]
thresList = np.arange(tempDic["minThres"],tempDic["maxThres"],tempDic["step"])
return thresList
#step = (tempDic["maxThres"]-tempDic["minThres"])/tempDic["thresNb"]
#thresList = np.arange(tempDic["minThres"],tempDic["maxThres"],step)
def saveTempAccPerThres(exp_id,model_id,meanAccPerThres):
np.savetxt("../results/{}/{}_tempAcc.csv".format(exp_id,model_id),meanAccPerThres)
def computeMetrics(path,dataset,videoName,resFilePaths,videoNameDict,metTun,transMat,metricParamDict):
......@@ -370,7 +376,7 @@ def plotData(nbClass,dataset):
plt.tight_layout()
plt.savefig("../vis/prior_{}.png".format(dataset))
def agregatePerfs(exp_id,paramAgr,keysRef,namesRef,tempAccMinThres,tempAccMaxThres,tempAccThresNb):
def agregatePerfs(exp_id,paramAgr,keysRef,namesRef,tempAccMinThres,tempAccMaxThres,tempAccThresStep):
csv = np.genfromtxt("../results/{}/metrics.csv".format(exp_id),delimiter=",",dtype="str")
......@@ -388,7 +394,7 @@ def agregatePerfs(exp_id,paramAgr,keysRef,namesRef,tempAccMinThres,tempAccMaxThr
else:
groupedLines[key] = [line]
plotTempAcc(groupedLines.keys(),tempAccMinThres,tempAccMaxThres,tempAccThresNb,exp_id,keyToNameDict,groupedLines)
plotTempAcc(groupedLines.keys(),tempAccMinThres,tempAccMaxThres,tempAccThresStep,exp_id,keyToNameDict,groupedLines)
csvStr = "Model&"+"&".join(metricNames)+"\\\\ \n \hline \n"
mean = np.zeros((len(groupedLines.keys()),csv.shape[1]-1))
......@@ -422,10 +428,9 @@ def agregatePerfs(exp_id,paramAgr,keysRef,namesRef,tempAccMinThres,tempAccMaxThr
#Computing the t-test
ttest_matrix(groupedLines,orderedKeys,exp_id,metricNames,keyToNameDict)
def plotTempAcc(keys,minThres,maxThres,thresNb,exp_id,keyToNameDict,groupedLines):
def plotTempAcc(keys,minThres,maxThres,thresStep,exp_id,keyToNameDict,groupedLines):
step = (maxThres-minThres)/thresNb
thresList = np.arange(minThres,maxThres,step)
thresList = np.arange(minThres,maxThres,thresStep)
plt.figure()
......@@ -571,7 +576,6 @@ def plotAttentionMaps(dataset,exp_id,model_id,plotFeatMaps):
while i < frameNb:
frame = video[i]
frame = frame[frame.shape[0]-frame.shape[1]:,:]
nearestLowerDiv = frame.shape[0]//16
......@@ -904,7 +908,8 @@ def main(argv=None):
argreader.parser.add_argument('--temp_acc_min_thres',type=float,metavar="MIN",help='The minimum threshold to use for temporal accuracy (In hours)')
argreader.parser.add_argument('--temp_acc_max_thres',type=float,metavar="MAX",help='The maximum threshold to use for temporal accuracy (In hours)')
argreader.parser.add_argument('--temp_acc_thres_nb',type=int,metavar="INT",help='The number of threshold to use for temporal accuracy.')
argreader.parser.add_argument('--temp_acc_thres_step',type=float,metavar="FLOAT",help='The step between thresholds')
argreader.parser.add_argument('--temp_acc_threstoprint',type=float,metavar="FLOAT",help='The threshold to print in the tables')
######################## Database plot #################################
......@@ -962,7 +967,6 @@ def main(argv=None):
args.test_part_beg,args.test_part_end,args.names)
if args.eval_model:
'''
if os.path.exists("../results/{}/metrics.csv".format(args.exp_id)):
os.remove("../results/{}/metrics.csv".format(args.exp_id))
......@@ -974,10 +978,11 @@ def main(argv=None):
evalModel(conf["dataset_test"],float(conf["test_part_beg"]),float(conf["test_part_end"]),str2bool(conf["prop_set_int_fmt"]),args.exp_id,model_id,epoch=args.epochs_to_process[i],\
nbClass=int(conf["class_nb"]),tempAccMinThres=args.temp_acc_min_thres,tempAccMaxThres=args.temp_acc_max_thres,tempAccThresNb=args.temp_acc_thres_nb)
'''
nbClass=int(conf["class_nb"]),tempAccMinThres=args.temp_acc_min_thres,tempAccMaxThres=args.temp_acc_max_thres,tempAccThresStep=args.temp_acc_thres_step,\
tempAccThresToPrint=args.temp_acc_threstoprint)
if len(args.param_agr) > 0:
agregatePerfs(args.exp_id,args.param_agr,args.keys,args.names,args.temp_acc_min_thres,args.temp_acc_max_thres,args.temp_acc_thres_nb)
agregatePerfs(args.exp_id,args.param_agr,args.keys,args.names,args.temp_acc_min_thres,args.temp_acc_max_thres,args.temp_acc_thres_step)
if not args.plot_data is None:
plotData(args.plot_data,args.dataset_test)
if args.plot_attention_maps:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment