Some changes to allow training with kaggle data

This commit is contained in:
2023-04-13 14:55:16 +00:00
parent c49645d7bc
commit 7c973f1b88
13 changed files with 1933 additions and 102 deletions

View File

@@ -61,20 +61,25 @@ def map_blazepose_keypoint(column):
return f"{mapped}_{hand}{suffix}"
def map_blazepose_df(df):
def map_blazepose_df(df, rename=True):
to_drop = []
if rename:
renamings = {}
for column in df.columns:
mapped_column = map_blazepose_keypoint(column)
if mapped_column:
renamings[column] = mapped_column
else:
to_drop.append(column)
df = df.rename(columns=renamings)
for index, row in df.iterrows():
sequence_size = len(row["leftEar_Y"])
lsx = row["leftShoulder_X"]
rsx = row["rightShoulder_X"]
lsy = row["leftShoulder_Y"]
rsy = row["rightShoulder_Y"]
# convert all to list
lsx = lsx[1:-1].split(",")
rsx = rsx[1:-1].split(",")
lsy = lsy[1:-1].split(",")
rsy = rsy[1:-1].split(",")
sequence_size = len(lsx)
neck_x = []
neck_y = []
# Treat each element of the sequence (analyzed frame) individually
@@ -84,4 +89,5 @@ def map_blazepose_df(df):
df.loc[index, "neck_X"] = str(neck_x)
df.loc[index, "neck_Y"] = str(neck_y)
return df
df.drop(columns=to_drop, inplace=True)
return df

View File

@@ -5,30 +5,30 @@ import pandas as pd
from normalization.hand_normalization import normalize_hands_full
from normalization.body_normalization import normalize_body_full
DATASET_PATH = './data/wlasl'
DATASET_PATH = './data/processed'
# Load the dataset
df = pd.read_csv(os.path.join(DATASET_PATH, "WLASL100_train.csv"), encoding="utf-8")
df = pd.read_csv(os.path.join(DATASET_PATH, "spoter_train.csv"), encoding="utf-8")
print(df.head())
print(df.columns)
# Retrieve metadata
video_size_heights = df["video_height"].to_list()
video_size_widths = df["video_width"].to_list()
# video_size_heights = df["video_height"].to_list()
# video_size_widths = df["video_width"].to_list()
# Delete redundant (non-related) properties
del df["video_height"]
del df["video_width"]
# del df["video_height"]
# del df["video_width"]
# Temporarily remove other relevant metadata
labels = df["labels"].to_list()
video_fps = df["fps"].to_list()
signs = df["sign"].to_list()
del df["labels"]
del df["fps"]
del df["split"]
del df["video_id"]
del df["label_name"]
del df["length"]
del df["sign"]
del df["path"]
del df["participant_id"]
del df["sequence_id"]
# Convert the strings into lists
@@ -41,7 +41,7 @@ for column in df.columns:
# Perform the normalizations
df = normalize_hands_full(df)
df, invalid_row_indexes = normalize_body_full(df)
# df, invalid_row_indexes = normalize_body_full(df)
# Clear lists of items from deleted rows
# labels = [t for i, t in enumerate(labels) if i not in invalid_row_indexes]
@@ -49,6 +49,6 @@ df, invalid_row_indexes = normalize_body_full(df)
# Return the metadata back to the dataset
df["labels"] = labels
df["fps"] = video_fps
df["sign"] = signs
df.to_csv(os.path.join(DATASET_PATH, "wlasl_train_norm.csv"), encoding="utf-8", index=False)
df.to_csv(os.path.join(DATASET_PATH, "spoter_train_norm.csv"), encoding="utf-8", index=False)