Qcodes icon indicating copy to clipboard operation
Qcodes copied to clipboard

Significant performance improvements for complex scalars

Open fblanchetNaN opened this issue 3 years ago • 1 comments

Following https://github.com/QCoDeS/Qcodes/pull/4446

To be more explicit, I write two scripts corresponding to the current situation and my PR:

master

import io
import timeit

import numpy as np


def _adapt_complex(value):
    out = io.BytesIO()
    np.save(out, np.array([value]))
    out.seek(0)
    return out.read()


def _convert_complex(text):
    out = io.BytesIO(text)
    out.seek(0)
    return np.load(out)[0]


value = np.complex128()
print(f"----- Input to adapt : size {np.dtype(value).itemsize} bytes -----")
print(value)
repetition = 1000000
print(
    f"-- Adapt time : {timeit.timeit('_adapt_complex(value)', globals=globals(), number=repetition)*1e6 / repetition:.4g} us --"
)
text = _adapt_complex(value)
print(
    f"----- Resulting data : size {len(text)} bytes, overhead {100*(len(text)/ np.dtype(value).itemsize - 1):.2f} % -----"
)
print(text)
repetition = 100000
print(
    f"-- Convert time : {timeit.timeit('_convert_complex(text)', globals=globals(), number=repetition)*1e6 / repetition:.4g} us --"
)

On my computer with python -OO, it gives:

----- Input to adapt : size 16 bytes -----
0j
-- Adapt time : 11.96 us --
----- Resulting data : size 144 bytes, overhead 800.00 % -----
b"\x93NUMPY\x01\x00v\x00{'descr': '<c16', 'fortran_order': False, 'shape': (1,), }                                                           \n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
-- Convert time : 126.3 us --

improvement

import timeit

import numpy as np


def _adapt_complex(value):
    return (
        value if isinstance(value, np.complexfloating) else np.complex_(value)
    ).tobytes()


numpy_concrete_complex = (np.complex64, np.complex128)
numpy_complex_map_size2type = {np.dtype(t).itemsize: t for t in numpy_concrete_complex}


def _convert_complex(text):
    try:
        value_size = len(text) % 64
        return np.frombuffer(
            text[-value_size:], dtype=numpy_complex_map_size2type[value_size]
        ).item()
    except KeyError as exc:
        raise ValueError(f"Cannot parse {str(text)}") from exc


value = np.complex128()
print(f"----- Input to adapt : size {np.dtype(value).itemsize} bytes -----")
print(value)
repetition = 10000000
print(
    f"-- Adapt time : {timeit.timeit('_adapt_complex(value)', globals=globals(), number=repetition)*1e6 / repetition:.4g} us --"
)
text = _adapt_complex(value)
print(
    f"----- Resulting data : size {len(text)} bytes, overhead {100*(len(text)/ np.dtype(value).itemsize - 1):.2f} % -----"
)
print(text)
repetition = 10000000
print(
    f"-- Convert time : {timeit.timeit('_convert_complex(text)', globals=globals(), number=repetition)*1e6 / repetition:.4g} us --"
)

On my computer with python -OO, it gives:

----- Input to adapt : size 16 bytes -----
0j
-- Adapt time : 0.5586 us --
----- Resulting data : size 16 bytes, overhead 0.00 % -----
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-- Convert time : 0.7448 us --

Both for time and memory, it looks like major improvements.

fblanchetNaN avatar Sep 21 '22 12:09 fblanchetNaN

Codecov Report

Merging #4642 (82b2743) into master (02d2bd5) will decrease coverage by 0.00%. The diff coverage is 80.00%.

@@            Coverage Diff             @@
##           master    #4642      +/-   ##
==========================================
- Coverage   68.24%   68.24%   -0.01%     
==========================================
  Files         339      339              
  Lines       31782    31781       -1     
==========================================
- Hits        21689    21688       -1     
  Misses      10093    10093              

codecov[bot] avatar Sep 21 '22 12:09 codecov[bot]