face_detection_in_realtime
face_detection_in_realtime copied to clipboard
在imagenet数据集上从头开始训练时,loss=7左右就非常难收敛,且accu非常低,请问有什么建议吗?
由于imagenet数据集太大,我只采用了它的val set进行训练,第七个epoch就非常难训练下去了,loss=7左右且accu非常低其结果非常差,请问是我的训练代码有问题吗?
from keras.preprocessing import image as image_utils from keras.applications.imagenet_utils import decode_predictions from keras.applications.imagenet_utils import preprocess_input
from keras.layers import Input from keras.models import Model import numpy as np from keras.utils import np_utils from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from keras.optimizers import Adam, SGD from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint,ReduceLROnPlateau from shufflenetv2 import ShuffleNetV2
def get_data2(data,i,shape=(320,320)): label=np_utils.to_categorical(data[i].split()[1],1000) image=image_utils.load_img(data[i].split()[0], target_size=shape) image = image_utils.img_to_array(image) #image = np.expand_dims(image, axis=0) image = preprocess_input(image,mode='tf') return image,label
def get_test_data(): test=open('test.txt').readlines() X_test=[] y_test=[] for i in range(len(test)): x,y=get_data2(test,i) X_test.append(x) y_test.append(y) return np.array(X_test),np.array(y_test)
def generate_arrays_from_file(file_name,batch_size):
while 1:
data = open(file_name).readlines()
cnt = 0
X =[]
Y =[]
for i in range(len(data)):
# create Numpy arrays of input data
# and labels, from each line in the file
x, y = get_data2(data,i)
X.append(x)
Y.append(y)
cnt += 1
if cnt==batch_size:
cnt = 0
yield (np.array(X), np.array(Y))
X = []
Y = []
def main(): input_shape=(320,320,3) model=ShuffleNetV2(include_top=True, input_shape=input_shape,bottleneck_ratio=1) log_dir='shufflenet_log/111/' logging = TensorBoard(log_dir=log_dir) checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', monitor='val_loss', save_weights_only=True, save_best_only=True, period=3) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3),
metrics=['accuracy'])
batch_size = 16
X_test,y_test=get_test_data()
# fine-tune the model
model.fit_generator(
generator=generate_arrays_from_file('val.txt',batch_size),
steps_per_epoch= max( 1 , 49500 // batch_size),
epochs=60,
initial_epoch=0,
validation_data=(X_test,y_test),
validation_steps= max( 1 , 500 // batch_size),
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
if name == 'main': main()
请问你有训练代码吗