Machine-Learning-Collection
Machine-Learning-Collection copied to clipboard
custom image testing
Hi, I am using method 1 from tutorial 18 for subfolders image dataset for using custom dataset. My code is running perfectly, but I want to know how can I test my own image(not included in dataset) on the model ?
I am also stuck on tutorial 18.
i wanted to use your code to work with my own dataset. I have labeled my own dataset by hand (took forever). i have about 300 pictures of numbers from 0-9 in subfolders as described in the tutorial.
i followed all steps but i am stuck at these lines here:
# Custom Loops
for epochs in range(10):
for x, y in ds_train:
# train here
pass
i looked through all your other tutorials but couldn't find where you explained custom loops. in the video you said that you explained that in previous videos but i couldn't find it.
i tried replacing the fashion dataset in tutorials with my own but its not the same format. the load_data() cant be applied to my dataset.
For example one of the many tutorials use this way to get their data:
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
How could i replace it with mine? So that i have (train_images, train_labels), (test_images, test_labels) ?
i am completely lost. i kinda made it work but when i feed the net with images it crashes. how can i work with the dataset format ? how do i feed my net with images now to make it do useful work except learning?
here is the error i am getting:
python3 classifier.py
2.5.0
Found 4403 files belonging to 10 classes.
Using 3963 files for training.
Found 4403 files belonging to 10 classes.
Using 440 files for validation.
layer setup ...
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 100, 100, 5) 85
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 50, 50, 5) 0
_________________________________________________________________
flatten (Flatten) (None, 12500) 0
_________________________________________________________________
dense (Dense) (None, 10) 125010
=================================================================
Total params: 125,095
Trainable params: 125,095
Non-trainable params: 0
_________________________________________________________________
compiling ...
training ...
Epoch 1/3
397/397 - 6s - loss: 44.9943 - accuracy: 0.8365
Epoch 2/3
397/397 - 7s - loss: 3.8356 - accuracy: 0.9463
Epoch 3/3
397/397 - 7s - loss: 3.0261 - accuracy: 0.9632
testing ...
44/44 - 0s - loss: 6.7556 - accuracy: 0.9432
Test accuracy: 0.9431818127632141
dataset/2/2021-04-23_16:30:03.png
Traceback (most recent call last):
File "/run/media/pro/imgscraper/classifier.py", line 147, in <module>
print('prediction:', np.argmax(probability_model.predict(img)))
File "/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 1727, in predict
tmp_batch_outputs = self.predict_function(iterator)
File "/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py", line 889, in __call__
result = self._call(*args, **kwds)
File "/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py", line 924, in _call
results = self._stateful_fn(*args, **kwds)
File "/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/eager/function.py", line 3022, in __call__
filtered_flat_args) = self._maybe_define_function(args, kwargs)
File "/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/eager/function.py", line 3440, in _maybe_define_function
return self._define_function_with_shape_relaxation(
File "/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/eager/function.py", line 3362, in _define_function_with_shape_relaxation
graph_function = self._create_graph_function(
File "/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/eager/function.py", line 3279, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/framework/func_graph.py", line 999, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py", line 672, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/framework/func_graph.py", line 986, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py:1569 predict_function *
return step_function(self, iterator)
/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py:1559 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py:1285 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py:2833 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py:3608 _call_for_each_replica
return fn(*args, **kwargs)
/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py:1552 run_step **
outputs = model.predict_step(data)
/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py:1525 predict_step
return self(x, training=False)
/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/keras/engine/base_layer.py:1013 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
/home/artur/.local/lib/python3.9/site-packages/tensorflow/python/keras/engine/input_spec.py:230 assert_input_compatibility
raise ValueError('Input ' + str(input_index) + ' of layer ' +
ValueError: Input 0 of layer sequential_1 is incompatible with the layer: : expected min_ndim=4, found ndim=3. Full shape received: (None, 100, 3)
here is the complete file:
import os, sys, cv2
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import random
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
#load my data #its 100x100 pictures
h = 100
w = 100
batch_size = 10
#define the training set
ds_train = tf.keras.preprocessing.image_dataset_from_directory(
'dataset/',
labels ='inferred',
label_mode = "int",
class_names = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
color_mode = 'grayscale',
batch_size = batch_size,
image_size=(h, w),
shuffle = True,
seed = 123,
validation_split=0.1,
subset="training",
)
#define the validation set
ds_validation = tf.keras.preprocessing.image_dataset_from_directory(
'dataset/',
labels ='inferred',
label_mode = "int",
class_names = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
color_mode = 'grayscale',
batch_size = batch_size,
image_size=(h, w),
shuffle = True,
seed = 123,
validation_split=0.1,
subset="validation",
)
# setup of layers
print('layer setup ...')
model = keras.Sequential([
layers.InputLayer((w,h,1)),
layers.Conv2D(filters=5,kernel_size=(4,4), strides=(1,1),padding='same'),
# layers.Conv2D(16,3,padding='same'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(10),
])
model.summary()
#compile of layers
print('compiling ...')
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#feed the model
print('training ...')
model.fit(ds_train, epochs=10, verbose=2)
#compare to test data set
print('testing ...')
test_loss, test_acc = model.evaluate(ds_validation, verbose=2)
print('\nTest accuracy:', test_acc)
### i want to feed the net some images and see its prediction ###
#softmax
probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])
predictions = probability_model.predict(ds_validation)
#load all files
files = list()
path = 'dataset'
for (dirpath, dirnames, filenames) in os.walk(path):
files += [os.path.join(dirpath, file) for file in filenames]
#randomize it
random.shuffle(files)
for f in files:
img = cv2.imread(f)
cv2.imshow('window', img) #show file to user
cv2.waitKey(0)
print(f)
print('prediction:', np.argmax(probability_model.predict(img)))
#prediction of the first item
#prediction = np.argmax(predictions[0])
#print('prediction of test 0', prediction)
#for i in predictions:
# print(np.argmax(i))
i got it working with some really weird hack...
img = cv2.imread(f)
orig = img.copy()
#Resize to respect the input_shape
img = cv2.resize(img, (w , h ))
#Convert img to GRAY might be different for your data
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img.reshape(1, w, h, 1) #this is the magic line that will make it work
print(f, type(img), img.shape)
print()
print('prediction:', np.argmax(probability_model.predict(img)))
print()
cv2.imshow('window', orig) #show file to user
cv2.waitKey(0)