Commit b5c53abf authored by E144069X's avatar E144069X

Removed the use of pims based functions

parent 1c907195
......@@ -31,6 +31,33 @@ from random import Random
maxTime = 200
def countFrames(videoPaths):
totalImgNb = 0
imgNbDict = {}
if not os.path.exists("../data/qual/frameNb.csv"):
frameNbArr = []
for videoPath in videoPaths:
nbImg = len(torchvision.io.read_video_timestamps(videoPath,pts_unit="sec")[0])
vidName = os.path.basename(videoPath)
frameNbArr.append((vidName,str(nbImg)))
frameNbArr = np.array(frameNbArr)
np.savetxt("../data/qual/frameNb.csv",frameNbArr,fmt="%s")
totalImgNb = frameNbArr[:,1].astype("int").sum()
else:
frameNbArr = np.genfromtxt("../data/qual/frameNb.csv",dtype=str)
for videoPath in videoPaths:
vidName = os.path.basename(videoPath)
if vidName in frameNbArr[:,0]:
nbImg = int(frameNbArr[frameNbArr[:,0] == vidName,1])
else:
nbImg = len(torchvision.io.read_video_timestamps(videoPath,pts_unit="sec")[0])
frameNbArr = np.concatenate((frameNbArr,np.array([[vidName,str(nbImg)]])),axis=0)
totalImgNb += nbImg
np.savetxt("../data/qual/frameNb.csv",frameNbArr,fmt="%s")
imgNbDict = {os.path.basename(videoPath):int(frameNbArr[frameNbArr[:,0] == os.path.basename(videoPath),1]) for videoPath in videoPaths}
return totalImgNb,imgNbDict
class Sampler(torch.utils.data.sampler.Sampler):
""" The sampler for the SeqTrDataset dataset
"""
......@@ -96,9 +123,7 @@ class SeqTrDataset(torch.utils.data.Dataset):
self.origImgSize = origImgSize
if propStart != propEnd:
for videoPath in self.videoPaths:
nbImg = utils.getVideoFrameNb(videoPath)
self.nbImages += nbImg
self.nbImages = countFrames(self.videoPaths)
self.resizeImage = resizeImage
......@@ -130,7 +155,10 @@ class SeqTrDataset(torch.utils.data.Dataset):
vidName = os.path.basename(os.path.splitext(self.videoPaths[vidInd])[0])
frameNb = utils.getVideoFrameNb(self.videoPaths[vidInd])
if not self.videoPaths[vidInd] in self.timeStamps.keys():
self.timeStamps[self.videoPaths[vidInd]] = torchvision.io.read_video_timestamps(self.videoPaths[vidInd],pts_unit="sec")[0]
frameNb = len(self.timeStamps[self.videoPaths[vidInd]])
#Computes the label index of each frame
gt = getGT(vidName,self.dataset)
......@@ -149,9 +177,6 @@ class SeqTrDataset(torch.utils.data.Dataset):
video = torchvision.io.read_video(self.videoPaths[vidInd],pts_unit="sec")[0]
if not self.videoPaths[vidInd] in self.timeStamps.keys():
self.timeStamps[self.videoPaths[vidInd]] = torchvision.io.read_video_timestamps(self.videoPaths[vidInd],pts_unit="sec")[0]
return loadFrames_and_process(frameInds,gt,timeElapsed,vidName,self.videoPaths[vidInd],self.preproc,self.timeStamps[self.videoPaths[vidInd]])
class PreProcess():
......@@ -232,9 +257,8 @@ class TestLoader():
self.origImgSize = origImgSize
self.imgSize = imgSize
self.resizeImage = resizeImage
self.nbImages = 0
for videoPath in self.videoPaths:
self.nbImages += utils.getVideoFrameNb(videoPath)
self.nbImages = countFrames(self.videoPaths)
if self.resizeImage:
self.reSizeTorchFunc = torchvision.transforms.Compose([torchvision.transforms.ToPILImage(),torchvision.transforms.Resize(imgSize)])
......@@ -272,7 +296,10 @@ class TestLoader():
vidName = os.path.basename(os.path.splitext(videoPath)[0])
frameNb = utils.getVideoFrameNb(videoPath)
if not videoPath in self.timeStamps.keys():
self.timeStamps[videoPath] = torchvision.io.read_video_timestamps(videoPath,pts_unit="sec")[0]
frameNb = len(self.timeStamps[videoPath])
if self.currFrameInd is None:
#The video are not systematically annotated from the begining
......@@ -297,9 +324,6 @@ class TestLoader():
else:
self.currFrameInd += L
if not videoPath in self.timeStamps.keys():
self.timeStamps[videoPath] = torchvision.io.read_video_timestamps(videoPath,pts_unit="sec")[0]
return loadFrames_and_process(frameInds,gt,timeElapsed,vidName,videoPath,self.preproc,self.timeStamps[videoPath])
def loadFrames_and_process(frameInds,gt,timeElapsed,vidName,videoPath,preproc,timeStamps):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment