bobbycar/logdata_visualization/logfix.py

81 lines
2.8 KiB
Python
Raw Normal View History

import numpy as np
from datetime import datetime
import time
import argparse
parser = argparse.ArgumentParser(description='Copys, renames and fixes logfiles written by bobbycar sd logger.')
parser.add_argument('input', type=argparse.FileType('r'))
parser.add_argument('output', nargs='?', type=argparse.FileType('w'))
args = parser.parse_args()
ok=True
inputFilename=args.input.name
outputFilename=None
if args.output is not None:
outputFilename=args.output.name
print("Input Filename: "+str(inputFilename))
with open(inputFilename, 'r') as reader:
lines = reader.readlines()
lines = [x.rstrip("\n") for x in lines] #remove \n
commentlines = [True if x.startswith('#') else False for x in lines] #generate mask for lines with comments
commentlines[0]= True #first line is a comment everytime
lines=np.array(lines)
commentlines=np.array(commentlines)
datalines = lines[commentlines==False] #get lines with data
header = datalines[0] #header is the first non comment line
headerSize = len(header.split(',')) #how many elements are expected per line
datalinesSize = [len(x.split(',')) for x in datalines] #count arraysize for every dataline
datalinesOK = datalines[np.array(datalinesSize)==headerSize]
datalinesFail = datalines[np.array(datalinesSize)!=headerSize]
#datalinesSizeBin = dict((x,datalinesSize.count(x)) for x in set(datalinesSize)) #binning
#normalSize = max(datalinesSizeBin, key=datalinesSizeBin.get) #get normal element count by highest bin
linesSize = [len(x.split(',')) for x in lines] #count arraysize for every dataline
linesOK = np.array(linesSize)==headerSize #mask for okay lines (valid for data lines)
timestamp=int(lines[0].split('TIMESTAMP:')[1]) #timestamp when file was created
print("Found "+str(len(lines))+" lines")
print(str(np.sum(commentlines))+" comments")
print(str(len(datalinesFail))+" Datalines Failed")
print(str(len(datalinesOK))+" Datalines OK")
print("Header Size is "+str(headerSize))
filetime = time.strftime('%Y%m%d_%H%M%S', time.localtime(timestamp))
if outputFilename is None:
outputFilename = filetime+".csv"
#is_dst(datetime(2019, 4, 1), timezone="US/Pacific")
print("Timestamp:"+str(timestamp)+" -> "+str(filetime))
print("UTC: "+ datetime.utcfromtimestamp(timestamp).strftime('%A, %Y-%m-%d %H:%M:%S'))
print("Local Time:"+time.strftime('%A, %Y-%m-%d %H:%M:%S', time.localtime(timestamp)))
print("Writing to: "+str(outputFilename))
linesWritten = 0
if ok:
with open(outputFilename, 'w') as writer:
for i,line in enumerate(lines):
if i!=0 and (commentlines[i] or linesOK[i]):
writer.write(line+"\n")
linesWritten+=1
else:
print("Skipped "+str(i)+": "+str(line))
print(str(linesWritten)+" lines written to "+str(outputFilename))
else:
print("Failed!")