#!/usr/bin/python3 import parser from series_util import discretize_series, extract_series from multiprocessing import Pool, Manager import pandas as pd class Gesture: # mantain data relative to gesture # files, parser, normalization def __init__(self, gestureId, parser, files): manager = Manager() self.pool = Pool() self.id = gestureId self.parser = parser self.files = files # to allow concurrent update self.norm = manager.dict() self.discretized = manager.dict() self.wordSet = manager.dict() # parallelization using map def normalize_files(self): results = [] for i in range(0, 4): results.append(self.pool.map(self.parser.normalize, self.files[i])) self.norm= {"w":results[0], "x":results[1], "y":results[2], "z":results[3]} def print_normalized(self): if not self.norm: self.normalize_files() print(self.norm) # TODO improve complexity def discretize(self, gaussianInterval): for coord in self.norm.keys(): # for each x,y,w,z frameList = [] for frame in self.norm.get(coord): # for each dataframe in x,y,w,z discFrame = [] for i in range(0, frame.shape[0]): # for each row (series) of the dataframe series = extract_series(frame.as_matrix(), i) discFrame.append(discretize_series(series, gaussianInterval)) frameList.append(pd.DataFrame(discFrame)) self.discretized[coord] = frameList # TODO improve complexity def get_words(self, window, step): for coord in self.norm.keys(): # for each x,y,w,z wordFrameList = [] for frame in self.discretized.get(coord): # for each dataframe in x,y,w,z wordFrame = [] for i in range(0, frame.shape[0]): # for each series of the dataframe words = get_gesture_words(extract_series(frame.as_matrix(), i)) wordFrame.append(words) wordFrameList.append(pd.DataFrame(wordList)) self.wordSet[coord] = wordFrameList