aboutsummaryrefslogtreecommitdiff
path: root/qolab
diff options
context:
space:
mode:
authorEugeniy E. Mikhailov <evgmik@gmail.com>2024-10-11 12:13:27 -0400
committerEugeniy E. Mikhailov <evgmik@gmail.com>2024-10-11 12:15:06 -0400
commit36aaf315c48238e9d9b2e0924594506dba105c24 (patch)
tree888b1065be844c4851fdc2cee125584a98488a9d /qolab
parent0fc099f0bdbdebae162b16d00daa81b9bf7a2299 (diff)
downloadqolab-36aaf315c48238e9d9b2e0924594506dba105c24.tar.gz
qolab-36aaf315c48238e9d9b2e0924594506dba105c24.zip
draft of getRawWaveform for SDS800XHD
Diffstat (limited to 'qolab')
-rw-r--r--qolab/hardware/scope/sds800xhd.py125
1 files changed, 123 insertions, 2 deletions
diff --git a/qolab/hardware/scope/sds800xhd.py b/qolab/hardware/scope/sds800xhd.py
index 561f308..436254f 100644
--- a/qolab/hardware/scope/sds800xhd.py
+++ b/qolab/hardware/scope/sds800xhd.py
@@ -24,12 +24,11 @@ class SDS800XHD(SDS1104X):
super().__init__(resource, *args, **kwds)
self.config["Device model"] = "SDS800XHD"
self.resource.read_termination = "\n"
- self.resource.timeout=1000
+ self.resource.timeout = 1000
self.numberOfChannels = 4
self.maxRequiredPoints = 1000
# desired number of points per channel, can return twice more
-
@BasicInstrument.tsdb_append
def getTimePerDiv(self):
qstr = "TDIV?"
@@ -46,6 +45,128 @@ class SDS800XHD(SDS1104X):
)
return float(numberString)
+ def getRawWaveform(
+ self, chNum, availableNpnts=None, maxRequiredPoints=None, decimate=True
+ ):
+ """
+ Get raw channel waveform in binary format.
+
+ Parameters
+ ----------
+ chNum : int
+ Scope channel to use: 1, 2, 3, or 4
+ availableNpnts : int or None (default)
+ Available number of points. Do not set it if you want it auto detected.
+ maxRequiredPoints : int
+ Maximum number of required points, if we ask less than available
+ we well get sparse set which proportionally fills all available time range.
+ decimate : False or True (default)
+ Decimate should be read as apply the low pass filter or not, technically
+ for both setting we get decimation (i.e. smaller than available
+ at the scope number of points). The name came from
+ ``scipy.signal.decimate`` filtering function.
+ If ``decimate=True`` is used, we get all available points
+ and then low-pass filter them to get ``maxRequiredPoints``
+ The result is less noisy then, but transfer time from the instrument
+ is longer.
+ If ``decimate=False``, then it we are skipping points to get needed number
+ but we might see aliasing, if there is a high frequency noise
+ and sparing > 1. Unless you know what you doing, it is recommended
+ to use ``decimate=True``.
+ """
+
+ rawChanCfg = {}
+ # switching to binary data transfer
+ self.write(":WAVeform:WIDTh WORD") # two bytes per data point
+ rawChanCfg["WaveformWidth"] = "WORD"
+ self.write(":WAVeform:BYTeorder LSB")
+ rawChanCfg["WaveformByteorder"] = "LSB"
+
+ if availableNpnts is None:
+ # using channel 1 to get availableNpnts
+ availableNpnts = self.getAvailableNumberOfPoints(1)
+ rawChanCfg["availableNpnts"] = availableNpnts
+
+ if maxRequiredPoints is None:
+ maxRequiredPoints = self.maxRequiredPoints
+ (
+ sparsing,
+ Npnts,
+ availableNpnts,
+ maxRequiredPoints,
+ ) = calcSparsingAndNumPoints(availableNpnts, maxRequiredPoints)
+ rawChanCfg["Npnts"] = Npnts
+ rawChanCfg["sparsing"] = sparsing
+ if decimate:
+ Npnts = availableNpnts # get all of them and decimate later
+ if (sparsing == 1 and Npnts == availableNpnts) or decimate:
+ # We are getting all points of the trace
+ self.write(":WAVeform:STARt 0") # start point to read from the scope memory
+ self.write(f":WAVeform:MAXPoint {availableNpnts}") # last point to read
+ self.write(":WAVeform:INTerval 1") # sparsing of 1, i.e. read every point
+ self.write(f":WAVeform:POINt {availableNpnts}") # transfer all points
+ else:
+ # we just ask every point with 'sparsing' interval
+ # fast to grab but we could do better with more advance decimate
+ # method, which allow better precision for the price
+ # of longer acquisition time
+ self.write(":WAVeform:STARt 0") # start point to read from the scope memory
+ self.write(f":WAVeform:MAXPoint {availableNpnts}") # last point to read
+ self.write(f":WAVeform:INTerval {sparsing}") # interval between points
+ # Note: it is not enough to provide sparsing
+ # number of requested points needed to be asked too.
+ # However this scope is smart enough to truncate the output to
+ # physically available points, if you request more no harm is done.
+ self.write(f":WAVeform:POINt {Npnts}") # transfer all points
+
+ trRaw = Trace(f"Ch{chNum}")
+ self.write(":WAVeform:SOURce C{chNum}")
+ qstr = f":WAVeform:DATA?"
+ if self.resource.interface_type == InterfaceType.usb:
+ # Setting chunk size to 496 bytes, it seems that SDS sends data
+ # in 512 bytes chunks via USB.
+ # Which is 8 packets of 64 bytes, but each packet takes 2 bytes for a header.
+ # Thus useful payload is 512-8*2 = 496
+ # see https://patchwork.ozlabs.org/project/qemu-devel/patch/20200317095049.28486-4-kraxel@redhat.com/
+ # Setting chunk_size for a large number has *catastrophic* results
+ # on data transfer rate, since we wait for more data
+ # which is not going to come until timeout expires
+ # Setting it low is not as bad but still slows down the transfer.
+ # NOTE: I am not sure if it is a Linux driver issue or more global.
+ # The transfer rate is about
+ # 5550 kB/S, for 10k points
+ # 1400 kB/S, for 50k points
+ # 1000 kB/S, for 100k points
+ # 500 kB/S, for 500k points
+ # 160 kB/S, for 1000k points
+ # 55 kB/S, for 2.5M points
+ # It is about factor of 2 slower (for 100k points),
+ # if the scope is in the Run mode, i.e. not Stopped.
+ # FIXME find why speed depends on number of points.
+ wfRaw = self.query_binary_values(
+ qstr,
+ datatype="h",
+ header_fmt="ieee",
+ container=np.array,
+ chunk_size=496,
+ )
+ else:
+ wfRaw = self.query_binary_values(
+ qstr, datatype="h", header_fmt="ieee", container=np.array
+ )
+ trRaw.values = wfRaw.reshape(wfRaw.size, 1)
+ if decimate and sparsing != 1:
+ numtaps = 3
+ # not sure it is the best case
+ trRaw.values = scipy.signal.decimate(
+ trRaw.values, sparsing, numtaps, axis=0
+ )
+
+ trRaw.config["unit"] = "Count"
+ trRaw.config["tags"]["Decimate"] = decimate
+ trRaw.config["tags"]["rawChanConfig"] = rawChanCfg
+ return trRaw
+
if __name__ == "__main__":
import pyvisa