cost too much performance during debug
when I was debugging in a DataLoader with this plugin, the plugin costed too much performance sothat the program cannot contiune at all.
when I am debugging, the plugin always shows this
Yeah... Probably happens because it was selected in the editor. Maybe I should change the expression evaluation to be lazy, only if the user explicitly requested to show it as an image. Will keep it in mind for the next release.
Thanks!hopes it can be fixed soon
`
Evaluating: exec("""
try: _python_view_image_mod except:
from types import ModuleType
_python_view_image_mod = ModuleType('python_view_image_mod', '')
exec('''def keyvalue(pair):
key, value = pair
return f"{stringify(key)}: {stringify(value)}"
def sanitize(s): # s.replace("'", "\'").replace('"', '\"') return s.replace("'", "").replace('"', '')
def stringify(value): if isinstance(value, list): return f"[{','.join(map(stringify, value))}]" elif isinstance(value, tuple): return f"({','.join(map(stringify, value))})" elif isinstance(value, dict): return f"{{{ ','.join(map(keyvalue, value.items())) }}}" elif isinstance(value, str): if value.startswith("Value(") or value.startswith("Error("): return value # keep Value/Error without wrapping quoted else: return f"'{value}'" elif isinstance(value, Exception): return f'"{type(value).name}: {sanitize(str(value))}"' else: return str(value)
def eval_into_value(func): try: return f"Value({stringify(func())})" except Exception as e: return f"Error({stringify(e)})"
def same_value_multiple_callables(get_value, funcs): try: val = get_value() return [eval_into_value(lambda: f(val)) for f in funcs] except Exception as e: return [f"Error({stringify(e)})"] * len(funcs)
def object_shape_if_it_has_one(obj): if hasattr(obj, "shape"): shape = obj.shape return tuple(shape) elif hasattr(obj, "width") and hasattr(obj, "height") and hasattr(obj, "getbands"): bands = "".join(map(str, obj.getbands())) return {"width": obj.width, "height": obj.height, "channels": bands} else: return None ''', _python_view_image_mod.dict)
exec(''' try: (is_numpy_image, numpy_image_info, numpy_image_save) except: try: try: import numpy as np
def numpy():
def is_numpy_image(img, restrict_types):
if restrict_types:
try:
return isinstance(img, np.ndarray)
except TypeError:
return False
else:
try:
img = np.asarray(img)
is_image = (img.ndim == 2) or (
img.ndim == 3 and img.shape[2] in (1, 3, 4)
)
return is_image
except:
return False
def info(img):
obj_type = type(img).__name__
try:
img = np.asarray(img)
shape = str(img.shape)
dtype = str(img.dtype)
return {"type": obj_type, "shape": shape, "dtype": dtype}
except:
return {"type": obj_type}
#
# Save code
#
def standalone_imsave(path, img):
def preprocess_for_png(img):
assert img.ndim >= 2 and img.ndim <= 3
has_alpha = img.ndim == 3 and img.shape[2] == 4
is_grayscale = img.ndim == 2 or (img.shape[2] == 1)
while img.ndim < 3:
img = img[..., None]
if is_grayscale:
img = np.concatenate((img, img, img), axis=2)
if not has_alpha:
mask = np.full((*img.shape[:2], 1), 0xFF)
img = np.concatenate((img, mask), axis=2)
return img.astype(np.ubyte)
# https://gist.github.com/campagnola/0fb74586d38ea1a86e99
def make_png(data):
import numpy as np
import zlib
import struct
assert data.dtype == np.ubyte
assert data.ndim == 3
assert data.shape[-1] == 4
# www.libpng.org/pub/png/spec/1.2/PNG-Structure.html
header = bytes.fromhex("89504e470d0a1a0a") # header
def mkchunk(data, name):
if isinstance(data, np.ndarray):
size = data.nbytes
else:
size = len(data)
chunk = np.empty(size + 12, dtype=np.ubyte)
chunk.data[0:4] = struct.pack("!I", size)
chunk.data[4:8] = name # b'CPXS' # critical, public, standard, safe
chunk.data[8 : 8 + size] = data
chunk.data[-4:] = struct.pack("!I", zlib.crc32(chunk[4:-4]))
return chunk
# www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IHDR
ctyp = 0b0110 # alpha, color
h, w = data.shape[:2]
depth = data.itemsize * 8
ihdr = struct.pack("!IIBBBBB", w, h, depth, ctyp, 0, 0, 0)
c1 = mkchunk(ihdr, b"IHDR")
# www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IDAT
idat = np.empty(
(h, w * 4 + 1), dtype=np.ubyte
) # insert filter byte at each scanline
idat[:, 1:] = data.reshape(h, w * 4)
idat[:, 0] = 0
c2 = mkchunk(zlib.compress(idat), b"IDAT")
c3 = mkchunk(np.empty((0,), dtype=np.ubyte), b"IEND")
# concatenate
lh = len(header)
png = np.empty(lh + c1.nbytes + c2.nbytes + c3.nbytes, dtype=np.ubyte)
png.data[:lh] = header
p = lh
for chunk in (c1, c2, c3):
png[p : p + len(chunk)] = chunk
p += chunk.nbytes
return png
with open(path, "wb") as fp:
fp.write(make_png(preprocess_for_png(img)))
def prepare_image(img, preprocess_method="normalize"):
img = np.asarray(img)
if img.dtype in (bool, np.bool_):
img = img.astype(np.uint8)
if preprocess_method == "skimage.img_as_ubyte":
try:
from skimage import img_as_ubyte
return img_as_ubyte(img)
except:
return img
elif preprocess_method == "normalize":
img = img - img.min()
img = img / img.max()
img = img * 255
img = img.astype(np.uint8)
return img
else:
return img
def opencv_imsave(path, img):
import cv2
cv2.imwrite(path, img)
def imageio_imsave(path, img):
import imageio
imageio.imwrite(path, img)
def pillow_imsave(path, img):
from PIL import Image
img = Image.fromarray(img)
img.save(path)
options = {
"opencv": ("cv2", opencv_imsave),
"imageio": ("imageio", imageio_imsave),
"Pillow": ("PIL", pillow_imsave),
"Standalone": ("numpy", standalone_imsave),
}
def try_import(package):
import importlib
try:
return importlib.import_module(package)
except ImportError:
return None
def get_function(preferred=None):
save_function = None
if preferred is not None and preferred in options:
module_test, function = options[preferred]
module = try_import(module_test)
if module:
return function
for name, (module_test, function) in options.items():
module = try_import(module_test)
if module:
save_function = function
break
return save_function
def save(path, img, backend, preprocess):
func = get_function(backend)
img = prepare_image(img, preprocess)
func(path, img)
return is_numpy_image, info, save
is_numpy_image, numpy_image_info, numpy_image_save = numpy()
except:
pass
except:
pass
''', _python_view_image_mod.dict )
exec(''' try: (is_pillow_image, pillow_image_info, pillow_image_save) except: try: try: import numpy as np import PIL import PIL.Image
def pillow():
def is_pillow_image(img):
try:
return isinstance(img, PIL.Image.Image)
except TypeError:
return False
def info(img):
obj_type = type(img).__name__
try:
img = np.asarray(img)
shape = str(img.shape)
dtype = str(img.dtype)
return {"type": obj_type, "shape": shape, "dtype": dtype}
except:
return {"type": obj_type}
def save(path, img, *args, **kwargs):
from PIL import Image
img.save(path)
return is_pillow_image, info, save
is_pillow_image, pillow_image_info, pillow_image_save = pillow()
except:
pass
except:
pass
''', _python_view_image_mod.dict )
exec(''' try: is_plotly_figure, plotly_figure_info, plotly_figure_save except: try: try: from plotly.basedatatypes import BaseFigure
def plotly_figure():
def is_plotly_figure(obj):
try:
return isinstance(obj, BaseFigure)
except TypeError:
return False
def info(obj):
obj_type = type(obj).__name__
return {"type": obj_type}
def save(path, fig, *args, **kwargs):
fig.write_image(path)
return is_plotly_figure, info, save
is_plotly_figure, plotly_figure_info, plotly_figure_save = plotly_figure()
except:
pass
except:
pass
''', _python_view_image_mod.dict )
exec(''' try: is_pyplot_figure, pyplot_figure_info, pyplot_figure_save except: try: try: import matplotlib.pyplot as plt
def set_matplotlib_agg(to_set):
if to_set:
import matplotlib
matplotlib.use('agg')
def pyplot_figure():
def is_pyplot_figure(obj):
try:
return isinstance(obj, plt.Figure)
except TypeError:
return False
def info(obj):
obj_type = type(obj).__name__
return {"type": obj_type}
def save(path, fig, tight=False, dpi=150, *args, **kwargs):
if fig is None:
fig = plt.gcf()
if tight:
fig.tight_layout()
fig.savefig(path, dpi=dpi)
return is_pyplot_figure, info, save
is_pyplot_figure, pyplot_figure_info, pyplot_figure_save = pyplot_figure()
def pyplot_axes():
def is_pyplot_ax(obj):
try:
return isinstance(obj, plt.Axes)
except TypeError:
return False
def info(obj):
obj_type = type(obj).__name__
return {"type": obj_type}
def save(path, ax, tight=False, dpi=150, *args, **kwargs):
from matplotlib.transforms import Bbox
fig = ax.figure
if tight:
fig.tight_layout()
renderer = fig.canvas.get_renderer()
items = []
items += ax.get_xticklabels() + ax.get_yticklabels()
items += [ax, ax.title]
bbox = Bbox.union(
[item.get_window_extent(renderer=renderer) for item in items]
)
extent = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(path, bbox_inches=extent, dpi=dpi)
return is_pyplot_ax, info, save
is_pyplot_ax, pyplot_ax_info, pyplot_ax_save = pyplot_axes()
except:
pass
set_matplotlib_agg(True)
except:
pass
''', _python_view_image_mod.dict )
exec(''' try: is_pyplot_ax, pyplot_ax_info, pyplot_ax_save except: try: try: import matplotlib.pyplot as plt
def set_matplotlib_agg(to_set):
if to_set:
import matplotlib
matplotlib.use('agg')
def pyplot_figure():
def is_pyplot_figure(obj):
try:
return isinstance(obj, plt.Figure)
except TypeError:
return False
def info(obj):
obj_type = type(obj).__name__
return {"type": obj_type}
def save(path, fig, tight=False, dpi=150, *args, **kwargs):
if fig is None:
fig = plt.gcf()
if tight:
fig.tight_layout()
fig.savefig(path, dpi=dpi)
return is_pyplot_figure, info, save
is_pyplot_figure, pyplot_figure_info, pyplot_figure_save = pyplot_figure()
def pyplot_axes():
def is_pyplot_ax(obj):
try:
return isinstance(obj, plt.Axes)
except TypeError:
return False
def info(obj):
obj_type = type(obj).__name__
return {"type": obj_type}
def save(path, ax, tight=False, dpi=150, *args, **kwargs):
from matplotlib.transforms import Bbox
fig = ax.figure
if tight:
fig.tight_layout()
renderer = fig.canvas.get_renderer()
items = []
items += ax.get_xticklabels() + ax.get_yticklabels()
items += [ax, ax.title]
bbox = Bbox.union(
[item.get_window_extent(renderer=renderer) for item in items]
)
extent = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(path, bbox_inches=extent, dpi=dpi)
return is_pyplot_ax, info, save
is_pyplot_ax, pyplot_ax_info, pyplot_ax_save = pyplot_axes()
except:
pass
set_matplotlib_agg(True)
except:
pass
''', _python_view_image_mod.dict )
exec(''' try: is_numpy_tensor, numpy_tensor_info, numpy_tensor_save except: try: try: import numpy as np import skimage.util import skimage.io from skimage import img_as_ubyte
def numpy_tensor():
def is_numpy_tensor(obj):
valid_channels = (1, 3, 4)
try:
is_valid = isinstance(obj, np.ndarray)
is_valid &= len(obj.shape) in (3, 4)
if len(obj.shape) == 3:
pass
elif len(obj.shape) == 4:
is_valid &= obj.shape[3] in valid_channels
return is_valid
except TypeError:
return False
def info(obj):
obj_type = type(obj).__name__
try:
obj = np.asarray(obj)
shape = str(obj.shape)
dtype = str(obj.dtype)
return {"type": obj_type, "shape": shape, "dtype": dtype}
except:
return {"type": obj_type}
def save(path, obj, normalize=True, pad=10, *args, **kwargs):
is_color = obj.ndim == 4
if is_color:
pad_value = (1.0,) * obj.shape[-1]
else:
pad_value = 1.0
montage = skimage.util.montage(
obj.copy(), # avoid modifying the input object
fill=pad_value,
rescale_intensity=normalize,
padding_width=pad,
multichannel=is_color,
)
skimage.io.imsave(path, img_as_ubyte(montage), check_contrast=False)
return is_numpy_tensor, info, save
is_numpy_tensor, numpy_tensor_info, numpy_tensor_save = numpy_tensor()
except:
pass
except:
pass
''', _python_view_image_mod.dict )
exec(''' try: is_torch_tensor, torch_tensor_info, torch_tensor_save except: try: try: import torch
def torch_tensor():
def is_torch_tensor(obj):
valid_channels = (1, 2, 3, 4)
try:
is_valid = isinstance(obj, torch.Tensor)
is_valid &= len(obj.shape) in (2, 3, 4)
if len(obj.shape) == 2:
pass
elif len(obj.shape) == 3:
is_valid &= obj.shape[0] in valid_channels
elif len(obj.shape) == 4:
is_valid &= obj.shape[1] in valid_channels
return is_valid
except:
return False
def info(obj):
obj_type = type(obj).__name__
try:
shape = str(tuple(obj.shape))
dtype = str(obj.dtype)
return {"type": obj_type, "shape": shape, "dtype": dtype}
except:
return {"type": obj_type}
def save(path, obj, normalize=True, pad=10, *args, **kwargs):
import torchvision
pad_value = 255
torchvision.utils.save_image(
obj.float(),
path,
normalize=normalize,
pad_value=pad_value,
padding=pad,
)
return is_torch_tensor, info, save
is_torch_tensor, torch_tensor_info, torch_tensor_save = torch_tensor()
except:
pass
except:
pass
''', _python_view_image_mod.dict )
""" ) did not finish after 3.00 seconds. `
new message showed up😂
This is the code I'm executing. Most of the cases, it should not take much long to run. Apparently, there are cases it is. My first guess would be importing torch, but it can be something else. You can try executing it yourself and see what causes the long execution. I'll consider making it smarter (e.g. lazy importing), but I'll have to figure out first what causes this.
I found that this problem will appear when debugging in multi threads, especially in debugging dataloader of pytorch
Thanks for checking it out! If you have minimal example which I can reproduce it, it would be helpful. In any case I'll try to reproduce it by myself.
BTW I published a new (pre-release) version, in which I modified some of the relevant code. I didn't try to fix any performance issues directly, but maybe some of the fixes can help nontheless.
I have another idea of something that can help, and it easy to try out. Add a breakpoint at the beginning of the script, somewhere in the global scope (e.g. right after the imports). Then, continue the debug as before. What I'm trying to achieve is to run the "setup" of the extension in the global scope so it won't need to run it again at the local scope. If it helps, please let me know and I can adjust my code to do it by default.
ok, which version can I try the new feature?
v3.0.17 is the latest. you have to click "switch to pre-release" in the extension page in vscode
it doesn't work
the problem disappeared when restrictImageTypes, but in this way, I cannot watch tensor
Ok. This is very helpful. I think you caught a bug! restrictImageTypes ensure that we don't try to convert arbitrary values to numpy (helps with performace...). But it does not mean that tensors should be unavailable (so the bug is that they're unavailable!).
I'll try to fix it soon.
👍hope you can fix soon
can you share with me a bit more information? mainly, what is the datatype and shape of the tensor (mostly I need to know if it's channel first or last)? and whether you are trying to show a batch or a single image thanks
type = torch.tensor dtype = torch.float32 shape = (16, 3, 96, 96)
I use that in debug pannel
@elazarcoh I also see this bug happaning when debuging torch train loops. but specifically I'm in a stack frame (function) that doesn't even have tensors (only 2 np arrays size 10kX600. maybe this happans even if tensors and dataloader exist in other frames?
I don't know what might be the cause, might be a bit hard to debug..
Can it be replicated with a minimal example, or does it happen only in more complex cases?