PaddleX
PaddleX copied to clipboard
复现pix2pix时,报错TypeError: 'paddle.fluid.core_avx.CPUPlace' object is not callable
Checklist:
- 查找历史相关issue寻求解答
- 翻阅FAQ常见问题汇总和答疑
- 确认bug是否在新版本里还未修复
- 翻阅PaddleX API文档说明
描述问题
Traceback (most recent call last):
File "D:\Users\urad\PycharmProjects\pythonProject\trainer.py", line 128, in
Process finished with exit code 1
复现
- 您是否已经正常运行我们提供的教程? 是
- 您是否在教程的基础上修改代码内容?还请您提供运行的代码 是 trainer.py +++++++++++++++++++++++++++++++++++++++++++++++++++++ import matplotlib.pyplot as plt from data_reader import data_reader from model import GAN_UNet import numpy as np import time import paddle
class CONFIG: def init(self): self.data_dir = 'data/data10830' self.dataset = 'cityscapes/cityscapes' self.model_net = 'Pix2pix' self.train_list = 'data/data10830/cityscapes/pix2pix_train_list' self.image_size = 256 self.crop_size = 224 self.crop_type = 'Random' self.shuffle = True self.drop_last = False self.run_test = False self.batch_size = 1
def show_picture(picture): plt.figure(figsize=(len(picture) * 4, 4), dpi=80) for i in range(len(picture)): plt.subplot(1, len(picture), i + 1) picture[i] = (picture[i][0].transpose((1, 2, 0)) + 1) / 2 plt.imshow(picture[i])
def train(input_data, epoch_num=1000, use_cuda =False, l1=100, path='./model/', step_num=10, print_interval=1): """ :param input_data: 数据加载 :param epoch_num: 训练次数 :param use_cuda: 是否使用GPU :param l1: l1 loss 占比权重 :param path: 模型保存路径 :param step_num: 梯度重复次数 :param print_interval: 打印间隔步数 :return: """ dis = GAN_UNet.Discriminator() gen = GAN_UNet.Generator() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() with paddle.utils.unique_name.guard(place): train_reader = input_data.make_data()[0] ones, zeros = '', '' gen_optimizer = paddle.optimizer.Adam(learning_rate=0.0002, beta1=0.5, beta2=0.999, parameters=gen.parameters()) dis_optimizer = paddle.optimizer.Adam(learning_rate=0.0002, beta1=0.5, beta2=0.999, parameters=dis.parameters())
print('Begin time :', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
first_iteration = True
for epoch in range(epoch_num):
for batch, data in enumerate(train_reader()):
a_positive = paddle.to_tensor(np.array(data[1]))
b_positive = paddle.to_tensor(np.array(data[0]))
# create real input
a_positive_b_positive = paddle.concat((a_positive, b_positive), axis=1)
# create fake input
b_negative = gen(a_positive)
a_positive_b_negative = paddle.concat((a_positive, b_negative), axis=1)
# Discriminator output
pre_positive = dis(a_positive_b_positive)
pre_negative = dis(a_positive_b_negative)
if first_iteration:
first_iteration = False
ones = paddle.to_tensor(np.ones(pre_positive.shape, 'float32'))
zeros = paddle.to_tensor(np.zeros(pre_positive.shape, 'float32'))
# Discriminator loss function
dis_loss_positive = paddle.nn.functional.binary_cross_entropy(pre_positive, ones)
dis_loss_positive = paddle.mean(dis_loss_positive)
dis_loss_negative = paddle.nn.functional.binary_cross_entropy(pre_negative, zeros)
dis_loss_negative = paddle.mean(dis_loss_negative)
dis_loss = dis_loss_positive + dis_loss_negative
dis_loss.backward()
dis_optimizer.minimize(dis_loss)
dis.clear_gradients()
# Generator loss function: L1 -Loss
a_positive = paddle.to_tensor(np.array(data[1]))
b_negative = gen(a_positive)
pic_negative = b_negative.numpy()
a_positive_b_negative = paddle.concat((a_positive, b_negative), axis=1)
pre_negative = dis(a_positive_b_negative)
gen_loss_negative = paddle.nn.functional.binary_cross_entropy(pre_negative, ones)
gen_loss_negative = paddle.mean(gen_loss_negative)
# L1 distance
gen_loss_l1 = paddle.mean(paddle.abs(b_positive - b_negative))
gen_loss = gen_loss_negative + gen_loss_l1 * l1
gen_loss.backward()
gen_optimizer.minimize(gen_loss)
gen.clear_gradients()
# print train information
if batch % print_interval == 0:
print('epoch:', epoch, ', batch:', batch,
', Discriminator positive loss:', dis_loss_positive.numpy(),
', Discriminator negative loss:',dis_loss_negative.numpy(),
', Generator negative loss:', gen_loss_negative.numpy(),
', Generator l1 loss:', gen_loss_l1.numpy())
# 打印图片
show_picture([data[1], data[0], pic_negative])
# 每 10000 batch 或 超过设定轮数 保存一次模型
if batch % 10000 == 0 and batch != 0:
paddle.save(gen.state_dict(), path + 'gen')
paddle.save(gen_optimizer.state_dict(), path + 'gen')
paddle.save(dis.state_dict(), path + 'dis')
paddle.save(dis_optimizer.state_dict(), path + 'dis')
print('SaveModel :', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), batch)
if batch + 1 >= step_num:
paddle.save(gen.state_dict(), path + 'gen')
paddle.save(gen_optimizer.state_dict(), path + 'gen')
paddle.save(dis.state_dict(), path + 'dis')
paddle.save(dis_optimizer.state_dict(), path + 'dis')
print('Finish time :', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
return
cfg = CONFIG() input_data = data_reader(cfg) train(input_data, use_cuda=False, step_num=1)
GAN_UNet.py ++++++++++++++++++++++++++++++++++++++++++++++++++ import paddle
###########################################################
Generator
########################################################### class Encoder(paddle.nn.Layer): def init(self, in_ch, out_ch): super(Encoder, self).init() self.Conv_BN_ReLU_2 = paddle.nn.Sequential( paddle.nn.Conv2D(in_channels=in_ch, out_channels=out_ch, kernel_size=3, stride=1, padding=1), paddle.nn.BatchNorm2D(out_ch), paddle.nn.ReLU(), paddle.nn.Conv2D(in_channels=out_ch, out_channels=out_ch, kernel_size=3, stride=1, padding=1), paddle.nn.BatchNorm2D(out_ch), paddle.nn.ReLU() ) self.downSample = paddle.nn.Sequential( paddle.nn.Conv2D(in_channels=out_ch, out_channels=out_ch, kernel_size=3, stride=2, padding=1), paddle.nn.BatchNorm2D(out_ch), paddle.nn.ReLU() )
def forward(self, x):
out = self.Conv_BN_ReLU_2(x)
out_2 = self.downSample(out)
return out, out_2
class Decoder(paddle.nn.Layer): def init(self, in_ch, out_ch): super(Decoder, self).init() self.Conv_BN_ReLU_2 = paddle.nn.Sequential( paddle.nn.Conv2D(in_channels=in_ch, out_channels=out_ch * 2, kernel_size=3, stride=1, padding=1), paddle.nn.BatchNorm2D(out_ch * 2), paddle.nn.ReLU(), paddle.nn.Conv2D(in_channels=out_ch * 2, out_channels=out_ch * 2, kernel_size=3, stride=1, padding=1), paddle.nn.BatchNorm2D(out_ch * 2), paddle.nn.ReLU() ) self.upSample = paddle.nn.Sequential( paddle.nn.Conv2DTranspose(in_channels=out_ch * 2, out_channels=out_ch, kernel_size=3, stride=2, padding=1, output_padding=1), paddle.nn.BatchNorm2D(out_ch), paddle.nn.ReLU() )
def forward(self, x, out):
x_out = self.Conv_BN_ReLU_2(x)
x_out = self.upSample(x_out)
concat_out = paddle.concat((x_out, out), axis=1)
return concat_out
class Generator(paddle.nn.Layer): def init(self): super(Generator, self).init() out_channels = [2 ** (i + 6) for i in range(5)] # 下采样 self.encode1 = Encoder(3, out_channels[0]) self.encode2 = Encoder(out_channels[0], out_channels[1]) self.encode3 = Encoder(out_channels[1], out_channels[2]) self.encode4 = Encoder(out_channels[2], out_channels[3]) # 上采样 self.decode1 = Decoder(out_channels[3], out_channels[3]) self.decode2 = Decoder(out_channels[4], out_channels[2]) self.decode3 = Decoder(out_channels[3], out_channels[1]) self.decode4 = Decoder(out_channels[2], out_channels[0])
self.output = paddle.nn.Sequential(
paddle.nn.Conv2D(out_channels[1], out_channels[0], kernel_size=3, stride=1, padding=1),
paddle.nn.BatchNorm2D(out_channels[0]),
paddle.nn.ReLU(),
paddle.nn.Conv2D(out_channels[0], out_channels[0], kernel_size=3, stride=1, padding=1),
paddle.nn.BatchNorm2D(out_channels[0]),
paddle.nn.ReLU(),
paddle.nn.Conv2D(out_channels[0], 3, 3, 1, 1),
paddle.nn.Sigmoid(),
)
def forward(self, x):
shortcut1, out1 = self.encode1(x)
shortcut2, out2 = self.encode2(out1)
shortcut3, out3 = self.encode3(out2)
shortcut4, out4 = self.encode4(out3)
out5 = self.decode1(out4, shortcut4)
out6 = self.decode2(out5, shortcut3)
out7 = self.decode3(out6, shortcut2)
out8 = self.decode4(out7, shortcut1)
out = self.output(out8)
return out
###########################################################
Discriminator
########################################################### class Discriminator(paddle.nn.Layer): def init(self, input_channel=3, output_channel=3, filter_num=64): super(Discriminator, self).init()
self.layer256 = paddle.nn.Sequential(
paddle.nn.Conv2D(input_channel + output_channel, filter_num, 4, stride=2, padding=1),
paddle.nn.LeakyReLU(0.2)
)
self.layer128 = paddle.nn.Sequential(
paddle.nn.Conv2D(filter_num, filter_num * 2, 4, stride=2, padding=1),
paddle.nn.BatchNorm2D(filter_num * 2),
paddle.nn.LeakyReLU(0.2)
)
self.layer64 = paddle.nn.Sequential(
paddle.nn.Conv2D(filter_num * 2, filter_num * 4, 4, stride=2, padding=1),
paddle.nn.BatchNorm2D(filter_num * 4),
paddle.nn.LeakyReLU(0.2)
)
self.layer32 = paddle.nn.Sequential(
paddle.nn.Conv2D(filter_num * 4, filter_num * 8, 4, stride=1, padding=1),
paddle.nn.BatchNorm2D(filter_num * 8),
paddle.nn.LeakyReLU(0.2)
)
self.layer31 = paddle.nn.Sequential(
paddle.nn.Conv2D(filter_num * 8, 1, 4, stride=1, padding=1),
paddle.nn.Sigmoid()
)
def forward(self, X):
layer256_out = self.layer256(X)
layer128_out = self.layer128(layer256_out)
layer64_out = self.layer64(layer128_out)
layer32_out = self.layer32(layer64_out)
layer31_out = self.layer31(layer32_out)
return layer31_out
-
您使用的数据集是?
-
请提供您出现的报错信息及相关log
环境
-
请提供您使用的PaddlePaddle和PaddleX的版本号
-
请提供您使用的操作系统信息,如Linux/Windows/MacOS window10
-
请问您使用的Python版本是? 3.10
-
请问您使用的CUDA/cuDNN的版本号是? cpu
开个终端测下下面两行代码,看下是否是您安装的Paddle版本不符合
import paddle
cpu_place = paddle.CPUPlace()
pix2pix在PaddleGan中已有实现,您可以直接使用PaddleGan:https://github.com/PaddlePaddle/PaddleGAN/blob/develop/docs/en_US/tutorials/pix2pix_cyclegan.md 能减少您自行开发的成本