RNN-for-Human-Activity-Recognition-using-2D-Pose-Input
RNN-for-Human-Activity-Recognition-using-2D-Pose-Input copied to clipboard
Error while predicting: "Input to reshape is a tensor with 1152 values, but the requested shape has 1179648"
I imported this project to TF 2.0 and normalized X_train and X_test data before saving my model. Then i tried to do predictions on normalized X_val file. But i get this error for pred = model.predict(valX)
Input to reshape is a tensor with 1152 values, but the requested shape has 1179648
[[node sequential/dense/Tensordot/Reshape (defined at C:\Users\Bahadir\source\repos\TF_to_Keras\TF_to_Keras\inference.py:90) ]] [Op:__inference_predict_function_2265]
Function call stack:
predict_function
The code for normalizing data:
import numpy as np
import sys
from sklearn import preprocessing
np.set_printoptions(suppress=True, formatter={'float_kind':'{:f}'.format})
dataset = "database"
X_train_path = dataset + "\X_train.txt"
X_test_path = dataset + "\X_test.txt"
X_val_path = dataset + "\X_val.txt"
n_steps = 32
def load_X(X_path):
file = open(X_path, 'r')
X_ = np.array(
[elem for elem in [
row.split(',') for row in file
]],
dtype=np.float32
)
file.close()
blocks = int(len(X_) / n_steps)
X_ = np.array(np.split(X_,blocks))
return X_
def normalize(X_):
nsamples, nx, ny = X_.shape
X_ = X_.reshape((nsamples,nx*ny))
X_ = np.array(preprocessing.normalize(X_, norm='max',copy=False), dtype=np.float32)
X_ = X_.reshape((nsamples, nx, ny))
return X_
def write_to_txt(X_, filedir):
file = open(filedir, "w")
for row in X_:
np.savetxt(file, row, fmt='%.25f')
file.close()
trainX = load_X(X_train_path)
trainX = normalize(trainX)
write_to_txt(trainX, "database_normalized/X_train.txt")
testX = load_X(X_test_path)
testX = normalize(testX)
write_to_txt(testX, "database_normalized/X_test.txt")
valX = load_X(X_val_path)
valX = normalize(valX)
write_to_txt(valX, "database_normalized/X_val.txt")
This is the model:
class LSTM_RNN:
@staticmethod
def build(n_hidden, n_input, n_steps, n_classes, batch_size, lambda_loss_amount):
# initialize the model
model = Sequential()
model.add(Dense(n_hidden, activation="relu", kernel_initializer="random_normal", bias_initializer="random_normal",
batch_input_shape=(batch_size, n_steps, n_input)))
model.add(LSTM(n_hidden, return_sequences=True, unit_forget_bias=1.0))
model.add(LSTM(n_hidden, unit_forget_bias=1.0))
model.add(Dense(n_classes, activation="softmax", kernel_initializer="random_normal", bias_initializer="random_normal",
kernel_regularizer=regularizers.l2(lambda_loss_amount), bias_regularizer=regularizers.l2(lambda_loss_amount)))
return model
This is the main code:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import save_model
from sklearn.model_selection import train_test_split
from sklearn import metrics
from bahadir.LSTM_RNN import LSTM_RNN
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import argparse
import random
import cv2
import time
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", default="database_normalized", help="path to input dataset")
args = vars(ap.parse_args())
EPOCHS = 150
BS = 1024
learning_rate = 0.002
decay_rate = 0.02
lambda_loss_amount = 0.0015
# initialize the data and labels
print("[INFO] loading data...")
labels = [
"JUMPING",
"JUMPING_JACKS",
"BOXING",
"WAVING_2HANDS",
"WAVING_1HAND",
"CLAPPING_HANDS"
]
X_train_path = args["dataset"] + "\X_train.txt"
X_test_path = args["dataset"] + "\X_test.txt"
y_train_path = args["dataset"] + "\Y_train.txt"
y_test_path = args["dataset"] + "\Y_test.txt"
n_steps = 32
def load_X(X_path):
file = open(X_path, 'r')
X_ = np.array(
[elem for elem in [
row.split(' ') for row in file # comma replaced by space because new normalized data have spaces between values
]],
dtype=np.float32
)
file.close()
blocks = int(len(X_) / n_steps)
X_ = np.array(np.split(X_,blocks))
return X_
def load_y(y_path):
file = open(y_path, 'r')
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# for 0-based indexing
return y_ - 1
trainX = load_X(X_train_path)
testX = load_X(X_test_path)
trainY = load_y(y_train_path)
testY = load_y(y_test_path)
training_data_count = len(trainX) # 4519 training series (with 50% overlap between each serie)
test_data_count = len(testX) # 1197 test series
n_input = len(trainX[0][0]) # num input parameters per timestep
n_hidden = 34 # Hidden layer num of features
n_classes = 6
print("(X shape, y shape, every X's mean, every X's standard deviation)")
print(trainX.shape, testY.shape, np.mean(testX), np.std(testX))
# initialize the model
print("[INFO] compiling model...")
model = LSTM_RNN.build(n_hidden=n_hidden, n_input=n_input, n_steps=n_steps, n_classes=n_classes, batch_size=BS, lambda_loss_amount=lambda_loss_amount)
opt = Adam(lr=learning_rate, decay=decay_rate)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# train the network
print("[INFO] training network...")
y_train_one_hot = to_categorical(trainY, 6)
y_test_one_hot = to_categorical(testY, 6)
train_size = trainX.shape[0] - trainX.shape[0] % BS
test_size = testX.shape[0] - testX.shape[0] % BS
H = model.fit(trainX[:train_size,:,:], y_train_one_hot[:train_size,:], batch_size=BS, epochs=EPOCHS, validation_data=(testX[:test_size,:,:], y_test_one_hot[:test_size,:]))
model.save("model_normalized_.h5", save_format="h5")
And finally, how i try to predict:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import save_model, load_model
from tensorflow.keras.utils import to_categorical
import numpy as np
import argparse
import random
import cv2
import time
import os
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", default="database_normalized", help="path to input dataset")
args = vars(ap.parse_args())
X_train_path = args["dataset"] + "\X_train.txt"
X_test_path = args["dataset"] + "\X_test.txt"
X_val_path = args["dataset"] + "\X_val.txt"
y_train_path = args["dataset"] + "\Y_train.txt"
y_test_path = args["dataset"] + "\Y_test.txt"
n_steps = 32
BS=32
def load_X(X_path):
file = open(X_path, 'r')
X_ = np.array(
[elem for elem in [
row.split(' ') for row in file
]],
dtype=np.float32
)
file.close()
blocks = int(len(X_) / n_steps)
X_ = np.array(np.split(X_,blocks))
return X_
def load_y(y_path):
file = open(y_path, 'r')
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# for 0-based indexing
return y_ - 1
trainX = load_X(X_train_path)
testX = load_X(X_test_path)
valX = load_X(X_val_path)
trainY = load_y(y_train_path)
testY = load_y(y_test_path)
testY_one_hot = to_categorical(testY, 6)
train_size = trainX.shape[0] - trainX.shape[0] % BS
test_size = testX.shape[0] - testX.shape[0] % BS
val_size = valX.shape[0] - valX.shape[0] % BS
model = load_model("model_normalized_.h5")
model.evaluate(testX[:test_size,:,:], testY_one_hot[:test_size,:]) # just for checking
pred = model.predict(valX)
#pred = model.predict(valX[:val_size,:,:])
print(pred)
Same here, did you solve this issue ?
filepath = './saved_model/model.h5'
# Load the model
new_model = load_model(filepath, compile = True)
X_val_path = DATASET_PATH + "X_val.txt"
X_val = load_X(X_val_path)
# Generate predictions for samples
predictions = new_model.predict(X_val)
print(predictions)
Error:
Input to reshape is a tensor with 1152 values, but the requested shape has 1179648
No, I couldn't solve it.
I met the same problem here. Have you resolve the problem ?