WizardLM
WizardLM copied to clipboard
resume_from_checkpoint
resume_from_checkpoint doesn't
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) checkpoint = torch.load("checkpoint.pt") model.load_state_dict(checkpoint["model_state_dict"]) optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) epoch = checkpoint["epoch"]
for i in range(epoch, num_epochs): for batch_idx, (data, target) in enumerate(train_loader): # Train the model
# Save the checkpoint after each epoch
checkpoint = {
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"epoch": i,
}
torch.save(checkpoint, "checkpoint.pt")