diff --git a/pyproject.toml b/pyproject.toml index fcd9474fda63..7e193455dbab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -205,6 +205,24 @@ reportUnusedClass = "none" reportUnnecessaryCast = "none" # mypy already checks this. If it fails for pyright its because mypy requires it reportUnnecessaryContains = "none" +[tool.ty] + +[tool.ty.rules] + +# we catch these via pyright or mypy so ignore here +# deprecated: we trigger deprecated on use of deprecated methods in other deprecated methods. +# unresolved-imports: we have a lot of imports that are only available with various libraries for specific instrument drivers +# unused-ignore-comment: mypy already checks for unused-ignores so it they are unused by ty its because mypy requires them +unresolved-import = "ignore" +deprecated = "ignore" +unused-type-ignore-comment = "ignore" + +[[tool.ty.overrides]] +include = ["src/qcodes/instrument_drivers/Harvard/Decadac.py"] + +[tool.ty.overrides.rules] +unresolved-attribute = "ignore" + [tool.pytest.ini_options] minversion = "7.2" testpaths = "tests" diff --git a/src/qcodes/dataset/dond/do_nd.py b/src/qcodes/dataset/dond/do_nd.py index c3b6a304f178..d40d27a11364 100644 --- a/src/qcodes/dataset/dond/do_nd.py +++ b/src/qcodes/dataset/dond/do_nd.py @@ -23,6 +23,7 @@ _set_write_period, catch_interrupts, ) +from qcodes.dataset.experiment_container import Experiment from qcodes.dataset.measurements import Measurement from qcodes.dataset.threading import ( SequentialParamsCaller, @@ -42,7 +43,7 @@ MultiAxesTupleListWithDataSet, ParamMeasT, ) - from qcodes.dataset.experiment_container import Experiment + LOG = logging.getLogger(__name__) SweepVarType = Any @@ -400,8 +401,12 @@ def _get_experiments( experiments_internal: Sequence[Experiment | None] = [ experiments ] * n_experiments_required - else: + elif not isinstance(experiments, Experiment): experiments_internal = experiments + else: + raise TypeError( + f"Invalid type for experiments got {experiments} of type {type(experiments)}" + ) if len(experiments_internal) != n_experiments_required: raise ValueError( diff --git a/src/qcodes/extensions/infer.py b/src/qcodes/extensions/infer.py index d22b4060fd6c..2e596cec6f5f 100644 --- a/src/qcodes/extensions/infer.py +++ b/src/qcodes/extensions/infer.py @@ -264,8 +264,10 @@ def get_parent_instruments_from_chain_of_type( param_chain = get_parameter_chain(parameter) return tuple( + # cast is required since mypy as of 1.19.1 cannot infer the type narrowing based + # on isinstance checks inside comprehensions [ - cast("TInstrument", param.instrument) + cast("TInstrument", param.instrument) # ty: ignore[redundant-cast] for param in param_chain if isinstance(param.instrument, instrument_type) ] diff --git a/src/qcodes/instrument/channel.py b/src/qcodes/instrument/channel.py index 3b774f1dc134..f6132a0a7364 100644 --- a/src/qcodes/instrument/channel.py +++ b/src/qcodes/instrument/channel.py @@ -198,42 +198,42 @@ def __init__( ) @overload - def __getitem__(self, i: int) -> InstrumentModuleType: ... + def __getitem__(self, index: int) -> InstrumentModuleType: ... @overload - def __getitem__(self: Self, i: slice | tuple[int, ...]) -> Self: ... + def __getitem__(self: Self, index: slice | tuple[int, ...]) -> Self: ... def __getitem__( - self: Self, i: int | slice | tuple[int, ...] + self: Self, index: int | slice | tuple[int, ...] ) -> InstrumentModuleType | Self: """ Return either a single channel, or a new :class:`ChannelTuple` containing only the specified channels Args: - i: Either a single channel index or a slice of channels + index: Either a single channel index or a slice of channels to get """ - if isinstance(i, slice): + if isinstance(index, slice): return type(self)( self._parent, self._name, self._chan_type, - self._channels[i], + self._channels[index], multichan_paramclass=self._paramclass, snapshotable=self._snapshotable, ) - elif isinstance(i, tuple): + elif isinstance(index, tuple): return type(self)( self._parent, self._name, self._chan_type, - [self._channels[j] for j in i], + [self._channels[j] for j in index], multichan_paramclass=self._paramclass, snapshotable=self._snapshotable, ) - return self._channels[i] + return self._channels[index] def __iter__(self) -> Iterator[InstrumentModuleType]: return iter(self._channels) @@ -244,8 +244,8 @@ def __reversed__(self) -> Iterator[InstrumentModuleType]: def __len__(self) -> int: return len(self._channels) - def __contains__(self, item: object) -> bool: - return item in self._channels + def __contains__(self, value: object) -> bool: + return value in self._channels def __repr__(self) -> str: return ( @@ -315,11 +315,9 @@ def name_parts(self) -> list[str]: name_parts.append(self.short_name) return name_parts - # the parameter obj should be called value but that would - # be an incompatible change - def index( # pyright: ignore[reportIncompatibleMethodOverride] + def index( self, - obj: InstrumentModuleType, + value: InstrumentModuleType, start: int = 0, stop: int = sys.maxsize, ) -> int: @@ -327,23 +325,21 @@ def index( # pyright: ignore[reportIncompatibleMethodOverride] Return the index of the given object Args: - obj: The object to find in the channel list. + value: The object to find in the channel list. start: Index to start searching from. stop: Index to stop searching at. """ - return self._channels.index(obj, start, stop) + return self._channels.index(value, start, stop) - def count( # pyright: ignore[reportIncompatibleMethodOverride] - self, obj: InstrumentModuleType - ) -> int: + def count(self, value: InstrumentModuleType) -> int: """Returns number of instances of the given object in the list Args: - obj: The object to find in the ChannelTuple. + value: The object to find in the ChannelTuple. """ - return self._channels.count(obj) + return self._channels.count(value) def get_channels_by_name(self: Self, *names: str) -> Self: """ @@ -717,15 +713,15 @@ def __init__( self._locked = False @overload - def __delitem__(self, key: int) -> None: ... + def __delitem__(self, index: int) -> None: ... @overload - def __delitem__(self, key: slice) -> None: ... + def __delitem__(self, index: slice) -> None: ... - def __delitem__(self, key: int | slice) -> None: + def __delitem__(self, index: int | slice) -> None: if self._locked: raise AttributeError("Cannot delete from a locked channel list") - self._channels.__delitem__(key) + self._channels.__delitem__(index) self._channel_mapping = { channel.short_name: channel for channel in self._channels } @@ -759,27 +755,25 @@ def __setitem__( channel.short_name: channel for channel in self._channels } - def append( # pyright: ignore[reportIncompatibleMethodOverride] - self, obj: InstrumentModuleType - ) -> None: + def append(self, value: InstrumentModuleType) -> None: """ Append a Channel to this list. Requires that the ChannelList is not locked and that the channel is of the same type as the ones in the list. Args: - obj: New channel to add to the list. + value: New channel to add to the list. """ if self._locked: raise AttributeError("Cannot append to a locked channel list") - if not isinstance(obj, self._chan_type): + if not isinstance(value, self._chan_type): raise TypeError( f"All items in a channel list must be of the same " - f"type. Adding {type(obj).__name__} to a " + f"type. Adding {type(value).__name__} to a " f"list of {self._chan_type.__name__}." ) - self._channel_mapping[obj.short_name] = obj - self._channels.append(obj) + self._channel_mapping[value.short_name] = value + self._channels.append(value) def clear(self) -> None: """ @@ -791,63 +785,59 @@ def clear(self) -> None: self._channels.clear() self._channel_mapping.clear() - def remove( # pyright: ignore[reportIncompatibleMethodOverride] - self, obj: InstrumentModuleType - ) -> None: + def remove(self, value: InstrumentModuleType) -> None: """ - Removes obj from ChannelList if not locked. + Removes value from ChannelList if not locked. Args: - obj: Channel to remove from the list. + value: Channel to remove from the list. """ if self._locked: raise AttributeError("Cannot remove from a locked channel list") else: - self._channels.remove(obj) - self._channel_mapping.pop(obj.short_name) + self._channels.remove(value) + self._channel_mapping.pop(value.short_name) - def extend( # pyright: ignore[reportIncompatibleMethodOverride] - self, objects: Iterable[InstrumentModuleType] - ) -> None: + def extend(self, values: Iterable[InstrumentModuleType]) -> None: """ - Insert an iterable of objects into the list of channels. + Insert an iterable of InstrumentModules into the list of channels. Args: - objects: A list of objects to add into the + values: A list of InstrumentModules to add into the :class:`ChannelList`. """ - # objects may be a generator but we need to iterate over it twice + # values may be a generator but we need to iterate over it twice # below so copy it into a tuple just in case. if self._locked: raise AttributeError("Cannot extend a locked channel list") - objects_tuple = tuple(objects) - if not all(isinstance(obj, self._chan_type) for obj in objects_tuple): + values_tuple = tuple(values) + if not all(isinstance(value, self._chan_type) for value in values_tuple): raise TypeError("All items in a channel list must be of the same type.") - self._channels.extend(objects_tuple) - self._channel_mapping.update({obj.short_name: obj for obj in objects_tuple}) + self._channels.extend(values_tuple) + self._channel_mapping.update( + {value.short_name: value for value in values_tuple} + ) - def insert( # pyright: ignore[reportIncompatibleMethodOverride] - self, index: int, obj: InstrumentModuleType - ) -> None: + def insert(self, index: int, value: InstrumentModuleType) -> None: """ Insert an object into the ChannelList at a specific index. Args: index: Index to insert object. - obj: Object of type chan_type to insert. + value: Object of type chan_type to insert. """ if self._locked: raise AttributeError("Cannot insert into a locked channel list") - if not isinstance(obj, self._chan_type): + if not isinstance(value, self._chan_type): raise TypeError( f"All items in a channel list must be of the same " - f"type. Adding {type(obj).__name__} to a list of {self._chan_type.__name__}." + f"type. Adding {type(value).__name__} to a list of {self._chan_type.__name__}." ) - self._channels.insert(index, obj) - self._channel_mapping[obj.short_name] = obj + self._channels.insert(index, value) + self._channel_mapping[value.short_name] = value def get_validator(self) -> ChannelTupleValidator: """ diff --git a/src/qcodes/instrument/mockers/ami430.py b/src/qcodes/instrument/mockers/ami430.py index 329c3cbc616c..c02b1bbc68a0 100644 --- a/src/qcodes/instrument/mockers/ami430.py +++ b/src/qcodes/instrument/mockers/ami430.py @@ -163,7 +163,7 @@ def _handle_messages(self, msg): if callable(handler): # some of the callables in the dict does not take arguments. # ignore that warning for now since this is mock code only - rval = handler(args) # pyright: ignore[reportCallIssue] + rval = handler(args) # pyright: ignore[reportCallIssue] # ty: ignore[ too-many-positional-arguments] else: rval = handler diff --git a/src/qcodes/instrument_drivers/AlazarTech/dll_wrapper.py b/src/qcodes/instrument_drivers/AlazarTech/dll_wrapper.py index 15ef43685039..59a131825da3 100644 --- a/src/qcodes/instrument_drivers/AlazarTech/dll_wrapper.py +++ b/src/qcodes/instrument_drivers/AlazarTech/dll_wrapper.py @@ -62,6 +62,7 @@ def _mark_params_as_updated(*args: Any) -> None: def _check_error_code( return_code: int, func: Callable[..., Any], arguments: tuple[Any, ...] ) -> tuple[Any, ...]: + func_name: str = getattr(func, "__name__", "UnknownFunction") if return_code not in {API_SUCCESS, API_DMA_IN_PROGRESS}: argrepr = repr(arguments) if len(argrepr) > 100: @@ -69,15 +70,15 @@ def _check_error_code( logger.error( f"Alazar API returned code {return_code} from function " - f"{func.__name__} with args {argrepr}" + f"{func_name} with args {argrepr}" ) if return_code not in ERROR_CODES: raise RuntimeError( - f"unknown error {return_code} from function {func.__name__} with args: {argrepr}" + f"unknown error {return_code} from function {func_name} with args: {argrepr}" ) raise RuntimeError( - f"error {return_code}: {ERROR_CODES[ReturnCode(return_code)]} from function {func.__name__} with args: {argrepr}" + f"error {return_code}: {ERROR_CODES[ReturnCode(return_code)]} from function {func_name} with args: {argrepr}" ) return arguments diff --git a/src/qcodes/instrument_drivers/tektronix/AWG5014.py b/src/qcodes/instrument_drivers/tektronix/AWG5014.py index 2daf4cbd4392..3e058d146310 100644 --- a/src/qcodes/instrument_drivers/tektronix/AWG5014.py +++ b/src/qcodes/instrument_drivers/tektronix/AWG5014.py @@ -915,6 +915,9 @@ def get_sq_mode(self) -> str: def _pack_record( self, name: str, value: float | str | Sequence[Any] | npt.NDArray, dtype: str ) -> bytes: + def _pack_numpy_array(array: npt.NDArray) -> bytes: + return array.astype(" None: def __iter__(self) -> Iterator[P]: return iter(self._dict) - def __contains__(self, item: object) -> bool: - return item in self._dict + def __contains__(self, x: object) -> bool: + return x in self._dict def __len__(self) -> int: return len(self._dict) diff --git a/src/qcodes/parameters/sequence_helpers.py b/src/qcodes/parameters/sequence_helpers.py index 6cf30f249299..3ce2344c47f8 100644 --- a/src/qcodes/parameters/sequence_helpers.py +++ b/src/qcodes/parameters/sequence_helpers.py @@ -59,6 +59,14 @@ def is_sequence_of( next_shape = cast("tuple[int, ...]", shape[1:]) + # ty currently cannot infer that depth is not None here + # when both branches of the if are taken into account + # the type is narrowed to be not None in both branches + if depth is None: + raise ValueError( + f"Could not infer depth. depth is {depth} and shape is {shape}" + ) + for item in obj: if depth > 1: if not is_sequence_of(item, types, depth=depth - 1, shape=next_shape): diff --git a/tests/common.py b/tests/common.py index 3d2ea62c3f11..6bd662f5308e 100644 --- a/tests/common.py +++ b/tests/common.py @@ -92,7 +92,7 @@ def profile(func: Callable[P, T]) -> Callable[P, T]: """ def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: - profile_filename = func.__name__ + ".prof" + profile_filename = getattr(func, "__name__", "unknown_function") + ".prof" profiler = cProfile.Profile() result = profiler.runcall(func, *args, **kwargs) profiler.dump_stats(profile_filename) diff --git a/tests/dataset/measurement/test_measurement_context_manager.py b/tests/dataset/measurement/test_measurement_context_manager.py index 4377b81148af..7ca77b26ecbd 100644 --- a/tests/dataset/measurement/test_measurement_context_manager.py +++ b/tests/dataset/measurement/test_measurement_context_manager.py @@ -297,7 +297,7 @@ def test_unregister_parameter(DAC, DMM) -> None: not_parameters = [DAC, DMM, 0.0, 1] for notparam in not_parameters: with pytest.raises(ValueError): - meas.unregister_parameter(notparam) # pyright: ignore[reportArgumentType] + meas.unregister_parameter(notparam) # type: ignore # unregistering something not registered should silently "succeed" meas.unregister_parameter("totes_not_registered") diff --git a/tests/dataset/test_database_creation_and_upgrading.py b/tests/dataset/test_database_creation_and_upgrading.py index ea2e198ef3b9..8ee8bfab4544 100644 --- a/tests/dataset/test_database_creation_and_upgrading.py +++ b/tests/dataset/test_database_creation_and_upgrading.py @@ -52,6 +52,7 @@ from tests.common import error_caused_by, skip_if_no_fixtures from tests.dataset.conftest import temporarily_copied_DB +assert tests.dataset.__file__ is not None fixturepath = os.sep.join(tests.dataset.__file__.split(os.sep)[:-1]) fixturepath = os.path.join(fixturepath, "fixtures") diff --git a/tests/dataset/test_database_extract_runs.py b/tests/dataset/test_database_extract_runs.py index 7b0d727e8d49..af4598977c54 100644 --- a/tests/dataset/test_database_extract_runs.py +++ b/tests/dataset/test_database_extract_runs.py @@ -744,6 +744,7 @@ def test_old_versions_not_touched( _, new_v = get_db_version_and_newest_available_version(source_path) + assert tests.dataset.__file__ is not None fixturepath = os.sep.join(tests.dataset.__file__.split(os.sep)[:-1]) fixturepath = os.path.join( fixturepath, "fixtures", "db_files", "version2", "some_runs.db" diff --git a/tests/dataset/test_dataset_export.py b/tests/dataset/test_dataset_export.py index 5a377e28163f..0dbc7b7c8367 100644 --- a/tests/dataset/test_dataset_export.py +++ b/tests/dataset/test_dataset_export.py @@ -1579,13 +1579,13 @@ def test_multi_index_options_non_grid(mock_dataset_non_grid: DataSet) -> None: def test_multi_index_wrong_option(mock_dataset_non_grid: DataSet) -> None: with pytest.raises(ValueError, match="Invalid value for use_multi_index"): - mock_dataset_non_grid.to_xarray_dataset(use_multi_index=True) # pyright: ignore[reportArgumentType] + mock_dataset_non_grid.to_xarray_dataset(use_multi_index=True) # type: ignore with pytest.raises(ValueError, match="Invalid value for use_multi_index"): - mock_dataset_non_grid.to_xarray_dataset(use_multi_index=False) # pyright: ignore[reportArgumentType] + mock_dataset_non_grid.to_xarray_dataset(use_multi_index=False) # type: ignore with pytest.raises(ValueError, match="Invalid value for use_multi_index"): - mock_dataset_non_grid.to_xarray_dataset(use_multi_index="perhaps") # pyright: ignore[reportArgumentType] + mock_dataset_non_grid.to_xarray_dataset(use_multi_index="perhaps") # type: ignore def test_geneate_pandas_index() -> None: diff --git a/tests/dataset/test_fix_functions.py b/tests/dataset/test_fix_functions.py index 80155811b298..f7d95b3b5b93 100644 --- a/tests/dataset/test_fix_functions.py +++ b/tests/dataset/test_fix_functions.py @@ -18,6 +18,7 @@ from tests.common import skip_if_no_fixtures from tests.dataset.conftest import temporarily_copied_DB +assert tests.dataset.__file__ is not None fixturepath = os.sep.join(tests.dataset.__file__.split(os.sep)[:-1]) fixturepath = os.path.join(fixturepath, "fixtures") diff --git a/tests/dataset/test_measurement_extensions.py b/tests/dataset/test_measurement_extensions.py index 87a342b7323f..a641ed32a047 100644 --- a/tests/dataset/test_measurement_extensions.py +++ b/tests/dataset/test_measurement_extensions.py @@ -331,7 +331,7 @@ def test_dond_into_fails_with_together_sweeps( dond_into( datasaver, - TogetherSweep(sweep1, sweep2), # pyright: ignore [reportArgumentType] + TogetherSweep(sweep1, sweep2), # type: ignore meas1, ) _ = datasaver.dataset @@ -352,8 +352,8 @@ def test_dond_into_fails_with_groups(default_params, default_database_and_experi dond_into( datasaver, sweep1, - [meas1], # pyright: ignore [reportArgumentType] - [meas2], # pyright: ignore [reportArgumentType] + [meas1], # type: ignore + [meas2], # type: ignore ) _ = datasaver.dataset diff --git a/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500.py b/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500.py index e9fd04f2ed31..e7a2376e88b5 100644 --- a/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500.py +++ b/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500.py @@ -98,7 +98,7 @@ def test_submodule_access_by_channel(b1500: KeysightB1500) -> None: def test_enable_multiple_channels(b1500: KeysightB1500) -> None: mock_write = MagicMock() - b1500.write = mock_write + b1500.write: MagicMock = mock_write b1500.enable_channels([1, 2, 3]) @@ -107,7 +107,7 @@ def test_enable_multiple_channels(b1500: KeysightB1500) -> None: def test_disable_multiple_channels(b1500: KeysightB1500) -> None: mock_write = MagicMock() - b1500.write = mock_write + b1500.write: MagicMock = mock_write b1500.disable_channels([1, 2, 3]) @@ -116,7 +116,7 @@ def test_disable_multiple_channels(b1500: KeysightB1500) -> None: def test_use_nplc_for_high_speed_adc(b1500: KeysightB1500) -> None: mock_write = MagicMock() - b1500.write = mock_write + b1500.write: MagicMock = mock_write b1500.use_nplc_for_high_speed_adc() mock_write.assert_called_once_with("AIT 0,2") @@ -129,7 +129,7 @@ def test_use_nplc_for_high_speed_adc(b1500: KeysightB1500) -> None: def test_use_nplc_for_high_resolution_adc(b1500: KeysightB1500) -> None: mock_write = MagicMock() - b1500.write = mock_write + b1500.write: MagicMock = mock_write b1500.use_nplc_for_high_resolution_adc() mock_write.assert_called_once_with("AIT 1,2") @@ -142,7 +142,7 @@ def test_use_nplc_for_high_resolution_adc(b1500: KeysightB1500) -> None: def test_autozero_enabled(b1500: KeysightB1500) -> None: mock_write = MagicMock() - b1500.write = mock_write + b1500.write: MagicMock = mock_write assert b1500.autozero_enabled() is False @@ -159,7 +159,7 @@ def test_autozero_enabled(b1500: KeysightB1500) -> None: def test_use_manual_mode_for_high_speed_adc(b1500: KeysightB1500) -> None: mock_write = MagicMock() - b1500.write = mock_write + b1500.write: MagicMock = mock_write b1500.use_manual_mode_for_high_speed_adc() mock_write.assert_called_once_with("AIT 0,1") @@ -177,7 +177,7 @@ def test_use_manual_mode_for_high_speed_adc(b1500: KeysightB1500) -> None: def test_self_calibration_successful(b1500: KeysightB1500) -> None: mock_ask = MagicMock() - b1500.ask = mock_ask + b1500.ask: MagicMock = mock_ask mock_ask.return_value = "0" @@ -189,7 +189,7 @@ def test_self_calibration_successful(b1500: KeysightB1500) -> None: def test_self_calibration_failed(b1500: KeysightB1500) -> None: mock_ask = MagicMock() - b1500.ask = mock_ask + b1500.ask: MagicMock = mock_ask expected_response = CALResponse(1) + CALResponse(64) mock_ask.return_value = "65" @@ -207,7 +207,7 @@ def test_error_message(b1500: KeysightB1500) -> None: def test_clear_timer_count(b1500: KeysightB1500) -> None: mock_write = MagicMock() - b1500.write = mock_write + b1500.write: MagicMock = mock_write b1500.clear_timer_count() mock_write.assert_called_once_with("TSR") @@ -220,7 +220,7 @@ def test_clear_timer_count(b1500: KeysightB1500) -> None: def test_set_measuremet_mode(b1500: KeysightB1500) -> None: mock_write = MagicMock() - b1500.write = mock_write + b1500.write: MagicMock = mock_write b1500.set_measurement_mode(mode=constants.MM.Mode.SPOT, channels=[1, 2]) mock_write.assert_called_once_with("MM 1,1,2") @@ -228,7 +228,7 @@ def test_set_measuremet_mode(b1500: KeysightB1500) -> None: def test_get_measurement_mode(b1500: KeysightB1500) -> None: mock_ask = MagicMock() - b1500.ask = mock_ask + b1500.ask: MagicMock = mock_ask mock_ask.return_value = "MM 1,1,2" measurement_mode = b1500.get_measurement_mode() @@ -238,7 +238,7 @@ def test_get_measurement_mode(b1500: KeysightB1500) -> None: def test_get_response_format_and_mode(b1500: KeysightB1500) -> None: mock_ask = MagicMock() - b1500.ask = mock_ask + b1500.ask: MagicMock = mock_ask mock_ask.return_value = "FMT 1,1" measurement_mode = b1500.get_response_format_and_mode() @@ -248,7 +248,7 @@ def test_get_response_format_and_mode(b1500: KeysightB1500) -> None: def test_enable_smu_filters(b1500: KeysightB1500) -> None: mock_write = MagicMock() - b1500.write = mock_write + b1500.write: MagicMock = mock_write b1500.enable_smu_filters(True) mock_write.assert_called_once_with("FL 1") @@ -282,7 +282,7 @@ def test_error_message_is_called_after_setting_a_parameter( b1500: KeysightB1500, ) -> None: mock_ask = MagicMock() - b1500.ask = mock_ask + b1500.ask: MagicMock = mock_ask mock_ask.return_value = '+0,"No Error."' b1500.enable_smu_filters(True) diff --git a/tests/drivers/test_tektronix_AWG70000A.py b/tests/drivers/test_tektronix_AWG70000A.py index 004bb65ca9ca..18cfe3949f00 100644 --- a/tests/drivers/test_tektronix_AWG70000A.py +++ b/tests/drivers/test_tektronix_AWG70000A.py @@ -205,6 +205,7 @@ def test_seqxfile_from_fs(forged_sequence) -> None: # typing convenience make_seqx = TektronixAWG70000Base.make_SEQX_from_forged_sequence + assert auxfiles.__file__ is not None path_to_schema = auxfiles.__file__.replace("__init__.py", "awgSeqDataSets.xsd") with open(path_to_schema) as fid: diff --git a/tests/validators/test_literal.py b/tests/validators/test_literal.py index c549e8003afd..d1a88a521b3b 100644 --- a/tests/validators/test_literal.py +++ b/tests/validators/test_literal.py @@ -15,10 +15,10 @@ def test_literal_validator() -> None: a123_val.validate(1) with pytest.raises(ValueError, match="5 is not a member of "): - a123_val.validate(5, context="Outside range") # pyright: ignore[reportArgumentType] + a123_val.validate(5, context="Outside range") # type: ignore with pytest.raises(ValueError, match="some_str is not a member of "): - a123_val.validate("some_str", context="Wrong type") # pyright: ignore[reportArgumentType] + a123_val.validate("some_str", context="Wrong type") # type: ignore def test_literal_validator_repr() -> None: