abstract-art-neural-network
abstract-art-neural-network copied to clipboard
converting to cuda pytorch
Friendly greetings !
I'm just a sysadmin that happen to have a server with an absurdly powerful NVidia P100, i totally enjoy generative art but i'm not that much of a python programmer.
I tried my best to convert this notebook to cuda but, meh, not luck.
this is what i got but it come with warning and still doesn't seems to run on gpu : Do you think you can convert your cpu code to gpu ? :pray: thx <3
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
from matplotlib import colors
import os, copy
from PIL import Image
# In[2]:
print("Cuda is available : ", torch.cuda.is_available())
print("Current device #", torch.cuda.current_device(), ", Name : ", torch.cuda.get_device_name(torch.cuda.current_device()))
print("Current Device Memory allocated : ", torch.cuda.memory_allocated())
# In[3]:
def init_normal(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight)
class NN(nn.Module):
def __init__(self, activation=nn.Tanh, num_neurons=16, num_layers=9):
"""
num_layers must be at least two
"""
super(NN, self).__init__()
layers = [nn.Linear(2, num_neurons, bias=True), activation()]
for _ in range(num_layers - 1):
layers += [nn.Linear(num_neurons, num_neurons, bias=False), activation()]
layers += [nn.Linear(num_neurons, 3, bias=False), nn.Sigmoid()]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
def gen_new_image(size_x, size_y, save=True, **kwargs):
net = NN(**kwargs).cuda()
net.apply(init_normal)
colors = run_net(net, size_x, size_y)
plot_colors(colors)
if save is True:
save_colors(colors)
return net, colors
def run_net(net, size_x=128, size_y=128):
x = torch.arange(0, size_x, 1)
y = torch.arange(0, size_y, 1)
colors = torch.zeros((size_x, size_y, 2))
for i in x:
for j in y:
colors[i][j] = torch.tensor([float(i) / size_y - 0.5, float(j) / size_x - 0.5])
colors = colors.reshape(size_x * size_y, 2)
#img = net(torch.tensor(colors).type(torch.FloatTensor)).detach().numpy()
img = net(torch.tensor(colors).type(torch.cuda.FloatTensor)).cuda()
img2 = img.cpu().detach().numpy()
return img2.reshape(size_x, size_y, 3)
def plot_colors(colors, fig_size=15):
plt.figure(figsize=(fig_size, fig_size))
plt.imshow(colors, interpolation='nearest', vmin=0, vmax=1)
def save_colors(colors):
plt.imsave(str(np.random.randint(100000)) + ".png", colors)
def run_plot_save(net, size_x, size_y, fig_size=15):
colors = run_net(net, size_x, size_y)
plot_colors(colors, fig_size)
save_colors(colors)
# In[4]:
n,c = gen_new_image(1024, 1024, save=False, num_neurons=32)
# In[5]:
run_plot_save(n, 1080, 720)
# Let's see how the images change if we increase the depth
# In[57]:
for num_layers in range(2, 30, 3):
print(f"{num_layers} layers")
n,c = gen_new_image(128, 128, save=False, num_layers=num_layers)
# And also the effect of increasing the width
# In[58]:
for i in range(1, 10, 2):
print(f"{num_layers} layers")
n,c = gen_new_image(128, 128, save=False, num_neurons=2**i)
# What happens if we use ReLUs?
# In[60]:
n,c = gen_new_image(128, 128, save=False, activation=nn.ReLU)
# In[ ]:
Sorry, I don't have easy CUDA availability. Perhaps someone else can patch it.
On Mon, 18 Mar 2019 at 21:16, Laurent Laborde [email protected] wrote:
Friendly greetings !
I'm just a sysadmin that happen to have a server with an absurdly powerful NVidia P100, i totally enjoy generative art but i'm not that much of a python programmer.
I tried my best to convert this notebook to cuda but, meh, not luck.
this is what i got but it come with warning and still doesn't seems to run on gpu : Do you think you can convert your cpu code to gpu ? 🙏 thx <3
#!/usr/bin/env python
coding: utf-8
In[1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
from matplotlib import colors
import os, copy
from PIL import Image
In[2]:
print("Cuda is available : ", torch.cuda.is_available())
print("Current device #", torch.cuda.current_device(), ", Name : ", torch.cuda.get_device_name(torch.cuda.current_device()))
print("Current Device Memory allocated : ", torch.cuda.memory_allocated())
In[3]:
def init_normal(m):
if type(m) == nn.Linear: nn.init.normal_(m.weight)
class NN(nn.Module):
def __init__(self, activation=nn.Tanh, num_neurons=16, num_layers=9): """ num_layers must be at least two """ super(NN, self).__init__() layers = [nn.Linear(2, num_neurons, bias=True), activation()] for _ in range(num_layers - 1): layers += [nn.Linear(num_neurons, num_neurons, bias=False), activation()] layers += [nn.Linear(num_neurons, 3, bias=False), nn.Sigmoid()] self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x)
def gen_new_image(size_x, size_y, save=True, **kwargs):
net = NN(**kwargs).cuda() net.apply(init_normal) colors = run_net(net, size_x, size_y) plot_colors(colors) if save is True: save_colors(colors) return net, colors
def run_net(net, size_x=128, size_y=128):
x = torch.arange(0, size_x, 1) y = torch.arange(0, size_y, 1) colors = torch.zeros((size_x, size_y, 2)) for i in x: for j in y: colors[i][j] = torch.tensor([float(i) / size_y - 0.5, float(j) / size_x - 0.5]) colors = colors.reshape(size_x * size_y, 2) #img = net(torch.tensor(colors).type(torch.FloatTensor)).detach().numpy() img = net(torch.tensor(colors).type(torch.cuda.FloatTensor)).cuda() img2 = img.cpu().detach().numpy() return img2.reshape(size_x, size_y, 3)
def plot_colors(colors, fig_size=15):
plt.figure(figsize=(fig_size, fig_size)) plt.imshow(colors, interpolation='nearest', vmin=0, vmax=1)
def save_colors(colors):
plt.imsave(str(np.random.randint(100000)) + ".png", colors)
def run_plot_save(net, size_x, size_y, fig_size=15):
colors = run_net(net, size_x, size_y) plot_colors(colors, fig_size) save_colors(colors)
In[4]:
n,c = gen_new_image(1024, 1024, save=False, num_neurons=32)
In[5]:
run_plot_save(n, 1080, 720)
Let's see how the images change if we increase the depth
In[57]:
for num_layers in range(2, 30, 3):
print(f"{num_layers} layers") n,c = gen_new_image(128, 128, save=False, num_layers=num_layers)
And also the effect of increasing the width
In[58]:
for i in range(1, 10, 2):
print(f"{num_layers} layers") n,c = gen_new_image(128, 128, save=False, num_neurons=2**i)
What happens if we use ReLUs?
In[60]:
n,c = gen_new_image(128, 128, save=False, activation=nn.ReLU)
In[ ]:
— You are receiving this because you are subscribed to this thread. Reply to this email directly, view it on GitHub https://github.com/paraschopra/abstract-art-neural-network/issues/4, or mute the thread https://github.com/notifications/unsubscribe-auth/AAugqYYo2rCY3LRB4xIcxEwE2y4ja_94ks5vX7TegaJpZM4b6HXb .