Commit 9f51d684 authored by Gihan Jayatilaka's avatar Gihan Jayatilaka

inception model added

parent 514955f7
This diff is collapsed.
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
DEBUG=True
PC=False
GPU=1
NO_MAX_FRAMES=20000
TARGET_COST=0.01
# In[ ]:
import tensorflow as tf
tf.set_random_seed(625742)
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
# import keras.backend as K
import time
import numpy as np
import cv2
import sys
import os
import pandas as pd
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import numpy as np
# In[ ]:
def makeY(fileNameCsv,noFrames,skipFrames):
df=pd.read_csv(fileNameCsv, sep=',',header=None)
df=np.array(df).astype(np.float32)
return df[skipFrames:noFrames,:]
# In[ ]:
def makeX(fileNameVideo,noFrames,skipFrames):
cap = cv2.VideoCapture(fileNameVideo)
if (cap.isOpened()== False):
print("Error opening video stream or file")
ret,frame=cap.read()
X=np.zeros((noFrames,FRAME_HEIGHT,FRAME_WIDTH,COLOR_CHANNELS),dtype=np.float32)
for f in range(noFrames):
X[f,:,:,:]=frame
ret,frame=cap.read()
return X[skipFrames:,:,:,:]
# In[ ]:
FRAME_HEIGHT=224
FRAME_WIDTH=224
COLOR_CHANNELS=3
CELLS_PER_FRAME=9
INPUT_DIM=(FRAME_HEIGHT,FRAME_WIDTH)
OUTPUT_DIM=CELLS_PER_FRAME
EPOCHS=200
BATCH_SIZE=64
CUDA1=0
CUDA2=1
if GPU==0:
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
elif GPU==1:
os.environ["CUDA_VISIBLE_DEVICES"]="{}".format(CUDA2)
elif GPU==2:
os.environ["CUDA_VISIBLE_DEVICES"]="{},{}".format(CUDA1,CUDA2)
sess = tf.Session()
# In[23]:
def trainAndTestForVideo(model,fileName,noFrames,framesToSkip=0,videoFileFormat='.avi',testSplit=0.1):
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Starting new video file\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>{}".format(fileName))
FILE_NAME=fileName
FILE_NAME_VIDEO=FILE_NAME+videoFileFormat
FILE_NAME_CSV=FILE_NAME+'.csv'
NO_FRAMES=noFrames
if PC: NO_FRAMES=100
FRAMES_TO_SKIP=framesToSkip
dataX=makeX(FILE_NAME_VIDEO,NO_FRAMES,FRAMES_TO_SKIP)
dataY=makeY(FILE_NAME_CSV,NO_FRAMES,FRAMES_TO_SKIP)
dataX=(dataX-127.5)/128.0
if DEBUG: print("X type: {}, Y type: {}.".format(dataX.dtype,dataY.dtype))
xTrain, xTest, yTrain, yTest= train_test_split(dataX, dataY, test_size=testSplit)
print("SIZES: xTrain {}, yTrain {}, xTest {}, yTest {}".format(xTrain.shape,yTrain.shape,xTest.shape,yTest.shape))
model.fit(xTrain,yTrain,epochs=20, verbose=1)
model.evaluate(xTest,yTest)
# In[21]:
def alexNet():
model = Sequential()
# 1st Convolutional Layer
model.add(Conv2D(filters=96, input_shape=(224,224,3), kernel_size=(11,11), strides=(4,4), padding='valid'))
model.add(Activation('relu'))
# Max Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# 2nd Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(11,11), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Max Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# 3rd Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# 4th Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# 5th Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Max Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Passing it to a Fully Connected layer
model.add(Flatten())
# 1st Fully Connected Layer
model.add(Dense(4096, input_shape=(224*224*3,)))
model.add(Activation('relu'))
# Add Dropout to prevent overfitting
model.add(Dropout(0.4))
# 2nd Fully Connected Layer
model.add(Dense(4096))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))
# 3rd Fully Connected Layer
model.add(Dense(1000))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))
# Output Layer
model.add(Dense(9))
model.add(Activation('sigmoid'))
model.summary()
# Compile the model
model.compile(loss=keras.losses.mean_squared_error, optimizer='adam', metrics=['accuracy'])
return model
# In[22]:
trainAndTestForVideo(alexNet(),'./video/real00',10000,videoFileFormat='.avi',testSplit=0.2)
# In[ ]:
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"DEBUG=True\n",
"PC=False\n",
"GPU=1\n",
"NO_MAX_FRAMES=20000\n",
"\n",
"\n",
"TARGET_COST=0.01"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"tf.set_random_seed(625742)\n",
"from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split\n",
"# import keras.backend as K\n",
"import time\n",
"\n",
"import numpy as np\n",
"import cv2\n",
"import sys\n",
"import os\n",
"import pandas as pd\n",
"import keras\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D\n",
"from keras.layers.normalization import BatchNormalization\n",
"import numpy as np\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def makeY(fileNameCsv,noFrames,skipFrames):\n",
" df=pd.read_csv(fileNameCsv, sep=',',header=None)\n",
" df=np.array(df).astype(np.float32)\n",
" \n",
" return df[skipFrames:noFrames,:]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"def makeX(fileNameVideo,noFrames,skipFrames):\n",
" cap = cv2.VideoCapture(fileNameVideo)\n",
" if (cap.isOpened()== False): \n",
" print(\"Error opening video stream or file\")\n",
" ret,frame=cap.read()\n",
"\n",
"\n",
" X=np.zeros((noFrames,FRAME_HEIGHT,FRAME_WIDTH,COLOR_CHANNELS),dtype=np.float32)\n",
" \n",
" for f in range(noFrames):\n",
" X[f,:,:,:]=frame\n",
" ret,frame=cap.read()\n",
" return X[skipFrames:,:,:,:]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"FRAME_HEIGHT=224\n",
"FRAME_WIDTH=224\n",
"COLOR_CHANNELS=3\n",
"CELLS_PER_FRAME=9\n",
"\n",
"INPUT_DIM=(FRAME_HEIGHT,FRAME_WIDTH)\n",
"OUTPUT_DIM=CELLS_PER_FRAME\n",
"\n",
"\n",
"EPOCHS=200\n",
"BATCH_SIZE=64\n",
"CUDA1=4\n",
"CUDA2=7\n",
"\n",
"\n",
"if GPU==0:\n",
" os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"-1\" \n",
"elif GPU==1:\n",
" os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"{}\".format(CUDA1)\n",
"elif GPU==2:\n",
" os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"{},{}\".format(CUDA1,CUDA2)\n",
"\n",
"sess = tf.Session()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def trainAndTestForVideo(model,fileName,noFrames,framesToSkip=0,videoFileFormat='.avi',testSplit=0.1):\n",
" print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Starting new video file\\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>{}\".format(fileName))\n",
" \n",
" FILE_NAME=fileName\n",
" FILE_NAME_VIDEO=FILE_NAME+videoFileFormat\n",
" FILE_NAME_CSV=FILE_NAME+'.csv'\n",
"\n",
" NO_FRAMES=noFrames\n",
" if PC: NO_FRAMES=100\n",
" FRAMES_TO_SKIP=framesToSkip\n",
"\n",
" dataX=makeX(FILE_NAME_VIDEO,NO_FRAMES,FRAMES_TO_SKIP)\n",
" dataY=makeY(FILE_NAME_CSV,NO_FRAMES,FRAMES_TO_SKIP)\n",
" \n",
" dataX=(dataX-127.5)/128.0\n",
"\n",
" \n",
" if DEBUG: print(\"X type: {}, Y type: {}.\".format(dataX.dtype,dataY.dtype))\n",
" xTrain, xTest, yTrain, yTest= train_test_split(dataX, dataY, test_size=testSplit)\n",
" print(\"SIZES: xTrain {}, yTrain {}, xTest {}, yTest {}\".format(xTrain.shape,yTrain.shape,xTest.shape,yTest.shape))\n",
" \n",
" iters=int(input(\"How many iters more? : \"))\n",
" while iters>0:\n",
" model.fit(xTrain,yTrain,epochs=1, verbose=1)#,batch_size=BATCH_SIZE)\n",
" iters-=1\n",
" if iters==0:\n",
" iters=int(input(\"How many iters more? : \"))\n",
" \n",
" print(model.evaluate(xTrain,yTrain))\n",
" print(model.evaluate(xTest,yTest))\n",
" \n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"def vggNet():\n",
"\n",
" #Instantiate an empty model\n",
" model = Sequential([\n",
" \n",
" Conv2D(3, (1, 1), input_shape=(FRAME_HEIGHT,FRAME_WIDTH,COLOR_CHANNELS), padding='same', activation='relu'),\n",
"# Conv2D(64, (3, 3), activation='relu', padding='same'),\n",
"# MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),\n",
"# Conv2D(128, (3, 3), activation='relu', padding='same'),\n",
"# Conv2D(128, (3, 3), activation='relu', padding='same',),\n",
"# MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),\n",
"# Conv2D(256, (3, 3), activation='relu', padding='same',),\n",
"# Conv2D(256, (3, 3), activation='relu', padding='same',),\n",
"# Conv2D(256, (3, 3), activation='relu', padding='same',),\n",
"# MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),\n",
"# Conv2D(512, (3, 3), activation='relu', padding='same',),\n",
"# Conv2D(512, (3, 3), activation='relu', padding='same',),\n",
"# Conv2D(512, (3, 3), activation='relu', padding='same',),\n",
"# MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),\n",
"# Conv2D(512, (3, 3), activation='relu', padding='same',),\n",
"# Conv2D(512, (3, 3), activation='relu', padding='same',),\n",
"# Conv2D(512, (3, 3), activation='relu', padding='same',),\n",
"# MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),\n",
" Flatten(),\n",
"# Dense(256, activation='relu'),\n",
" Dense(CELLS_PER_FRAME, activation='sigmoid')\n",
" ])\n",
"\n",
" '''Dense(4096, activation='relu'),\n",
" Dense(4096, activation='relu'),'''\n",
" \n",
" model.summary()\n",
"\n",
" model.compile(loss=keras.losses.mean_squared_error, optimizer='adam', metrics=['accuracy'])\n",
" \n",
" return model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"trainAndTestForVideo(vggNet(),'./video/real00',10000,videoFileFormat='.avi',testSplit=0.2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
DEBUG=True
PC=False
GPU=1
NO_MAX_FRAMES=20000
TARGET_COST=0.01
# In[ ]:
import tensorflow as tf
tf.set_random_seed(625742)
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
# import keras.backend as K
import time
import numpy as np
import cv2
import sys
import os
import pandas as pd
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import numpy as np
# In[ ]:
def makeY(fileNameCsv,noFrames,skipFrames):
df=pd.read_csv(fileNameCsv, sep=',',header=None)
df=np.array(df).astype(np.float32)
return df[skipFrames:noFrames,:]
# In[ ]:
def makeX(fileNameVideo,noFrames,skipFrames):
cap = cv2.VideoCapture(fileNameVideo)
if (cap.isOpened()== False):
print("Error opening video stream or file")
ret,frame=cap.read()
X=np.zeros((noFrames,FRAME_HEIGHT,FRAME_WIDTH,COLOR_CHANNELS),dtype=np.float32)
for f in range(noFrames):
X[f,:,:,:]=frame
ret,frame=cap.read()
return X[skipFrames:,:,:,:]
# In[ ]:
FRAME_HEIGHT=224
FRAME_WIDTH=224
COLOR_CHANNELS=3
CELLS_PER_FRAME=9
INPUT_DIM=(FRAME_HEIGHT,FRAME_WIDTH)
OUTPUT_DIM=CELLS_PER_FRAME
EPOCHS=200
BATCH_SIZE=64
CUDA1=4
CUDA2=7
if GPU==0:
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
elif GPU==1:
os.environ["CUDA_VISIBLE_DEVICES"]="{}".format(CUDA1)
elif GPU==2:
os.environ["CUDA_VISIBLE_DEVICES"]="{},{}".format(CUDA1,CUDA2)
sess = tf.Session()
# In[ ]:
def trainAndTestForVideo(model,fileName,noFrames,framesToSkip=0,videoFileFormat='.avi',testSplit=0.1):
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Starting new video file\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>{}".format(fileName))
FILE_NAME=fileName
FILE_NAME_VIDEO=FILE_NAME+videoFileFormat
FILE_NAME_CSV=FILE_NAME+'.csv'
NO_FRAMES=noFrames
if PC: NO_FRAMES=100
FRAMES_TO_SKIP=framesToSkip
dataX=makeX(FILE_NAME_VIDEO,NO_FRAMES,FRAMES_TO_SKIP)
dataY=makeY(FILE_NAME_CSV,NO_FRAMES,FRAMES_TO_SKIP)
dataX=(dataX-127.5)/128.0
if DEBUG: print("X type: {}, Y type: {}.".format(dataX.dtype,dataY.dtype))
xTrain, xTest, yTrain, yTest= train_test_split(dataX, dataY, test_size=testSplit)
print("SIZES: xTrain {}, yTrain {}, xTest {}, yTest {}".format(xTrain.shape,yTrain.shape,xTest.shape,yTest.shape))
iters=int(input("How many iters more? : "))
while iters>0:
model.fit(xTrain,yTrain,epochs=1, verbose=1)#,batch_size=BATCH_SIZE)
iters-=1
if iters==0:
iters=int(input("How many iters more? : "))
print(model.evaluate(xTrain,yTrain))
print(model.evaluate(xTest,yTest))
# In[ ]:
def vggNet():
#Instantiate an empty model
model = Sequential([
Conv2D(3, (1, 1), input_shape=(FRAME_HEIGHT,FRAME_WIDTH,COLOR_CHANNELS), padding='same', activation='relu'),
# Conv2D(64, (3, 3), activation='relu', padding='same'),
# MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
# Conv2D(128, (3, 3), activation='relu', padding='same'),
# Conv2D(128, (3, 3), activation='relu', padding='same',),
# MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
# Conv2D(256, (3, 3), activation='relu', padding='same',),
# Conv2D(256, (3, 3), activation='relu', padding='same',),
# Conv2D(256, (3, 3), activation='relu', padding='same',),
# MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
# Conv2D(512, (3, 3), activation='relu', padding='same',),
# Conv2D(512, (3, 3), activation='relu', padding='same',),
# Conv2D(512, (3, 3), activation='relu', padding='same',),
# MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
# Conv2D(512, (3, 3), activation='relu', padding='same',),
# Conv2D(512, (3, 3), activation='relu', padding='same',),
# Conv2D(512, (3, 3), activation='relu', padding='same',),
# MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Flatten(),
# Dense(256, activation='relu'),
Dense(CELLS_PER_FRAME, activation='sigmoid')
])
'''Dense(4096, activation='relu'),
Dense(4096, activation='relu'),'''
model.summary()
model.compile(loss=keras.losses.mean_squared_error, optimizer='adam', metrics=['accuracy'])
return model
# In[ ]:
trainAndTestForVideo(vggNet(),'./video/real00',10000,videoFileFormat='.avi',testSplit=0.2)
# In[ ]:
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"OUT_WIDTH = 1000\n",
"OUT_HEIGHT = 540"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Include basics\n",
"\n",
"import subprocess\n",
"import numpy as np\n",
"import cv2\n",
"import sys\n",
"import os\n",
"import time"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Extract frames\n",
"WIDTH = 1920\n",
"HEIGHT = 1080\n",
"OUT_WIDTH = 1000\n",
"OUT_HEIGHT = 540\n",
"OUTIMG = True\n",
"\n",
"inputFileName = \"feb24/rec_16090_5_split.mp4\"#Video recorded by the phone\n",
"tranFileName = \"feb24/tran_16090_5_split.csv\"#CSV timestamp,groundTruth generated by the PC\n",
"outputFileName = \"feb24/data_16090_5_split.npz\"#'arr0'--->frame recorded by camera, arr1------>label\n",
"\n",
"print((OUT_HEIGHT, OUT_WIDTH))\n",
"\n",
"SYN = 0\n",
"FINE = 0\n",
"\n",
"frameTS = []\n",
"output = subprocess.Popen(\"ffprobe -v error -show_entries frame=pkt_pts_time -select_streams v -of csv=p=0 \" + inputFileName, shell=True, stdout=subprocess.PIPE).stdout.read()\n",
"stamps = output.decode(\"utf-8\").rstrip().split('\\n')\n",
"for ts in stamps:\n",
" frameTS.append(float(ts))\n",
"print(\"Timestamps loaded\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"times = []\n",
"pat = []\n",
"print(\"Read tran file\")\n",
"with open(tranFileName, 'r') as tranfile:\n",
" line = tranfile.readline().rstrip()\n",
" attrs = line.split(\",\")\n",
" SYN = int(attrs[0])\n",
" FINE = float(attrs[1])\n",
" for line in tranfile:\n",
" tem = np.zeros((56,76), dtype='byte')\n",
" attrs = line.rstrip().split(\",\")\n",
" times.append(float(attrs[0]))\n",
" if attrs[len(attrs)-1] == 'none':\n",
" pat.append([None])\n",
" continue\n",
" for i in range(76):\n",
" for j in range(56):\n",
" tem[j,i] = int(attrs[i*56+j+1])\n",
" if(tem[j,i] > 1) or (tem[j,i] < 0):\n",
" print(\"ERORORORORO\")\n",
" pat.append(tem)\n",
"\n",
"print(\"Num samples: \" + str(len(times)))\n",
"violate = 0\n",
"imgset = []\n",
"tranindex = 1\n",
"index = 0\n",
"x_data = np.zeros((12000, int(OUT_HEIGHT/3), int(OUT_WIDTH/4), 3))\n",
"y_label = np.zeros((12000,9), dtype='byte')\n",
"video = cv2.VideoCapture(inputFileName)\n",
"#video.set(cv2.CAP_PROP_POS_FRAMES, 8990)\n",
"while(video.isOpened()):\n",
" ret, frame = video.read()\n",
" if ret:\n",
" ts = frameTS[index] - frameTS[SYN] + FINE\n",
" dist0 = times[tranindex-1] - times[0]\n",
" dist1 = times[tranindex] - times[0]\n",
" patindex = tranindex - 10\n",
" if ts >= dist0 and ts <= dist1:\n",
" imgset.append(frame)\n",
" elif ts > dist1:\n",
" if len(imgset) <= 2:\n",
" violate = violate + 1\n",
" if OUTIMG == True:\n",
" if (tranindex < 100) or (tranindex > 11800):\n",
" cv2.imwrite(\"tem/\" + str(patindex) + \".jpg\", imgset[len(imgset)-1])\n",
" if (patindex >= 0) and (patindex < 1000):\n",
" x_data[patindex,:,:] = cv2.resize(imgset[len(imgset)-1][HEIGHT-OUT_HEIGHT:HEIGHT, 0:OUT_WIDTH,:], (int(OUT_WIDTH/4),int(OUT_HEIGHT/3)))\n",
" # x_data[patindex,:,:,:] = imgset[len(imgset)-1][HEIGHOUT_HEIGHT/3):HEIGHT, 0:int(OUT_WIDTH/4),:]\n",
" #tem = pat33[tranindex]\n",
" for j in range(9):\n",
" y_label[patindex][j] = (pat[tranindex])[55,j] & 0x01\n",
" #tem = tem >> 1\n",
" if(patindex == 0) or (patindex == 11999):\n",
" cv2.imwrite(str(patindex) + \".jpg\", x_data[patindex,:,:,:])\n",
" print(y_label[patindex])\n",
" tranindex = tranindex + 1\n",
" imgset = [frame]\n",
" print(\"Total: \" + str(patindex) + \", Violate: \" + str(violate))\n",
" if tranindex >= len(times):\n",