foolbox icon indicating copy to clipboard operation
foolbox copied to clipboard

LinfPGD not work

Open AlbertZhangHIT opened this issue 4 years ago • 4 comments

Hi, I'm trying to test the LinfPGD attack on ImageNet validation set for PyTorch Resnet18. But it seems that the attacks are not throughly successful because the robust accuracy does not drop too much.

Here is my snippets.

import torch
import torchvision
from torchvision import datasets, models, transforms
import os
from tqdm import tqdm
import foolbox
from foolbox import accuracy
import numpy as np

if __name__ == "__main__":

	def classPredict(y, label):
		_, predict = torch.max(y.data, 1)
		return predict.eq(label.data).sum().item()

	class AverageMeter(object):
		"""Computes and stores the average and current values
		"""
		def __init__(self):
			self.reset()

		def reset(self):
			self.val = 0
			self.avg = 0
			self.sum = 0
			self.count = 0

		def update(self, val, n=1):
			self.val = val
			self.sum += val
			self.count += n
			self.avg = self.sum/self.count	

	data_dir = "./ImageNet"

	imgset = datasets.ImageNet(root=data_dir, split='val', download=False, 
			transform=transforms.Compose([
						transforms.Resize([224, 224]),
						transforms.ToTensor(), 
						transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
				]))

	val_loader = torch.utils.data.DataLoader(imgset, batch_size=8, shuffle=False, num_workers=1)
	total_batches = len(val_loader)
	device = 'cuda:0'

	model = models.resnet18(pretrained=True).to(device).eval()
	preprocessing = None #dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
	fmodel = foolbox.models.PyTorchModel(model, bounds=(0,1), device=device, preprocessing=preprocessing)
	stepsize = 2/255
	epsilon = 8/255
	attack = foolbox.attacks.LinfPGD(rel_stepsize=0.01, abs_stepsize=stepsize, 
									steps=50, random_start=True)

	robust_accuracy = AverageMeter()
	std_accuracy = AverageMeter()

	for i, (data, label) in data_stream:

		n = label.size(0)
		data, label = data.to(device), label.to(device)
		# standard original accuracy
		y = fmodel(data)
		correct = classPredict(y, label)
		std_accuracy.update(correct, n)

		# attack returns: adversarial examples, _, successful attacked
		_, advs, success = attack(fmodel, data, label, epsilons=epsilon)
		correct = n - success.sum().item()
		robust_accuracy.update(correct, n)

		data_stream.set_description(
			('progress: [{trained}/{total}] ({percent:.1f}%) | '
			'standard accuracy: {sacc.avg:.2f} | '
			'robust accuracy: {racc.avg:.2f} | '
			).format(
				trained=i, total=total_batches, percent=(100.*i/total_batches), 
				sacc=std_accuracy, racc=robust_accuracy, 
			)
		)

And the immediate result

progress: [968/6250] (15.5%) | standard accuracy: 0.74 | robust accuracy: 0.67 | : : 968it [17:22,  1.08s/it]

Did I do something wrong?

AlbertZhangHIT avatar Sep 03 '20 15:09 AlbertZhangHIT

Were you able to find a solution? That would interest me as well.

mipet23 avatar Dec 24 '20 10:12 mipet23

Have you found a solution? I encountered the same problem when using FGSM attack on CIFAR10 dataset.

louvinci avatar Nov 04 '21 12:11 louvinci

It looks like you normalize all samples as part of the DataLoader

transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) This means, that a the model does not operate in the [0,1] value range but a very different one. To fix this, you should remove the Normalize transformation form the dataloader, and do this transformation as part of the model's forward pass. This should solve your issue.

zimmerrol avatar Jan 27 '22 13:01 zimmerrol

Hello, Could you give me an example?
I don't understand if 'preprocessing' is done first or 'bounds [0,1]'.

In my case, if we use our own image, how should we add transformation to the model? Thank you!

//////////// preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3) fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=1)) # [1,3,224,224] Batchsize = 1 for Explain transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) img = Image.open(args.image_path) img = img.resize((224, 224)) img images = transform(img).unsqueeze(0) # [0.4667, 0.4471, 0.4235, ..., 0.0235, 0.0549, 0.1020]

Changgun-Choi avatar Apr 03 '22 07:04 Changgun-Choi