Skip to content

Commit

Permalink
Remove all (including pipeline) uses of Step.__call__ (#8945)
Browse files Browse the repository at this point in the history
  • Loading branch information
braingram authored Nov 8, 2024
2 parents bf7699e + ba195b9 commit ffa0266
Show file tree
Hide file tree
Showing 14 changed files with 173 additions and 172 deletions.
1 change: 1 addition & 0 deletions changes/8945.stpipe.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Remove all uses of Step.__call__ to allow it's deprecation.
16 changes: 8 additions & 8 deletions jwst/master_background/master_background_mos_step.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,10 +235,10 @@ def _calc_master_background(self, data, user_background=None):
self.barshadow.source_type = 'EXTENDED'
self.photom.source_type = 'EXTENDED'

pre_calibrated = self.flat_field(data)
pre_calibrated = self.pathloss(pre_calibrated)
pre_calibrated = self.barshadow(pre_calibrated)
pre_calibrated = self.photom(pre_calibrated)
pre_calibrated = self.flat_field.run(data)
pre_calibrated = self.pathloss.run(pre_calibrated)
pre_calibrated = self.barshadow.run(pre_calibrated)
pre_calibrated = self.photom.run(pre_calibrated)

# Create the 1D, fully calibrated master background.
if user_background:
Expand Down Expand Up @@ -269,9 +269,9 @@ def _calc_master_background(self, data, user_background=None):
self.flat_field.use_correction_pars = True
self.flat_field.inverse = True

mb_multislit = self.photom(mb_multislit)
mb_multislit = self.barshadow(mb_multislit)
mb_multislit = self.pathloss(mb_multislit)
mb_multislit = self.flat_field(mb_multislit)
mb_multislit = self.photom.run(mb_multislit)
mb_multislit = self.barshadow.run(mb_multislit)
mb_multislit = self.pathloss.run(mb_multislit)
mb_multislit = self.flat_field.run(mb_multislit)

return master_background, mb_multislit
6 changes: 3 additions & 3 deletions jwst/pipeline/calwebb_ami3.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def process(self, input):

# Do the LG analysis for this image
log.debug('Do LG processing for member %s', input_file)
result1, result2, result3 = self.ami_analyze(input_file)
result1, result2, result3 = self.ami_analyze.run(input_file)

# Save the averaged LG analysis results to a file
result1.meta.asn.pool_name = asn['asn_pool']
Expand All @@ -90,7 +90,7 @@ def process(self, input):

# Do the LG analysis for this image
log.debug('Do LG processing for member %s', input_file)
result1, result2, result3 = self.ami_analyze(input_file)
result1, result2, result3 = self.ami_analyze.run(input_file)

# Save the LG analysis results to a file
result1.meta.asn.pool_name = asn['asn_pool']
Expand All @@ -104,7 +104,7 @@ def process(self, input):
# assuming one ref star exposure per targ exposure
if (len(psf_files) > 0) & (len(targ_files) > 0):
for (targ, psf) in zip(targ_lg,psf_lg):
result = self.ami_normalize(targ, psf)
result = self.ami_normalize.run(targ, psf)
# Save the result
result.meta.asn.pool_name = asn['asn_pool']
result.meta.asn.table_name = op.basename(asn.filename)
Expand Down
12 changes: 6 additions & 6 deletions jwst/pipeline/calwebb_coron3.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,15 +151,15 @@ def process(self, user_input):
# Perform outlier detection on the PSFs.
if not skip_outlier_detection:
for model in psf_models:
self.outlier_detection(model)
self.outlier_detection.run(model)
# step may have been skipped for this model;
# turn back on for next model
self.outlier_detection.skip = False
else:
self.log.info('Outlier detection skipped for PSF\'s')

# Stack all the PSF images into a single CubeModel
psf_stack = self.stack_refs(psf_models)
psf_stack = self.stack_refs.run(psf_models)
psf_models.close()

# Save the resulting PSF stack
Expand All @@ -175,13 +175,13 @@ def process(self, user_input):

# Remove outliers from the target
if not skip_outlier_detection:
target = self.outlier_detection(target)
target = self.outlier_detection.run(target)
# step may have been skipped for this model;
# turn back on for next model
self.outlier_detection.skip = False

# Call align_refs
psf_aligned = self.align_refs(target, psf_stack)
psf_aligned = self.align_refs.run(target, psf_stack)

# Save the alignment results
self.save_model(
Expand All @@ -190,7 +190,7 @@ def process(self, user_input):
)

# Call KLIP
psf_sub = self.klip(target, psf_aligned)
psf_sub = self.klip.run(target, psf_aligned)
psf_aligned.close()

# Save the psf subtraction results
Expand All @@ -210,7 +210,7 @@ def process(self, user_input):
resample_library = ModelLibrary(resample_input, on_disk=False)

# Output is a single datamodel
result = self.resample(resample_library)
result = self.resample.run(resample_library)

# Blend the science headers
try:
Expand Down
34 changes: 17 additions & 17 deletions jwst/pipeline/calwebb_dark.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,29 +67,29 @@ def process(self, input):
# the steps are in a different order than NIR
log.debug('Processing a MIRI exposure')

input = self.group_scale(input)
input = self.dq_init(input)
input = self.emicorr(input)
input = self.saturation(input)
input = self.ipc(input)
input = self.firstframe(input)
input = self.lastframe(input)
input = self.reset(input)
input = self.linearity(input)
input = self.rscd(input)
input = self.group_scale.run(input)
input = self.dq_init.run(input)
input = self.emicorr.run(input)
input = self.saturation.run(input)
input = self.ipc.run(input)
input = self.firstframe.run(input)
input = self.lastframe.run(input)
input = self.reset.run(input)
input = self.linearity.run(input)
input = self.rscd.run(input)

else:

# process Near-IR exposures
log.debug('Processing a Near-IR exposure')

input = self.group_scale(input)
input = self.dq_init(input)
input = self.saturation(input)
input = self.ipc(input)
input = self.superbias(input)
input = self.refpix(input)
input = self.linearity(input)
input = self.group_scale.run(input)
input = self.dq_init.run(input)
input = self.saturation.run(input)
input = self.ipc.run(input)
input = self.superbias.run(input)
input = self.refpix.run(input)
input = self.linearity.run(input)

log.info('... ending calwebb_dark')

Expand Down
58 changes: 29 additions & 29 deletions jwst/pipeline/calwebb_detector1.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,18 +89,18 @@ def process(self, input):
# the steps are in a different order than NIR
log.debug('Processing a MIRI exposure')

input = self.group_scale(input)
input = self.dq_init(input)
input = self.emicorr(input)
input = self.saturation(input)
input = self.ipc(input)
input = self.firstframe(input)
input = self.lastframe(input)
input = self.reset(input)
input = self.linearity(input)
input = self.rscd(input)
input = self.dark_current(input)
input = self.refpix(input)
input = self.group_scale.run(input)
input = self.dq_init.run(input)
input = self.emicorr.run(input)
input = self.saturation.run(input)
input = self.ipc.run(input)
input = self.firstframe.run(input)
input = self.lastframe.run(input)
input = self.reset.run(input)
input = self.linearity.run(input)
input = self.rscd.run(input)
input = self.dark_current.run(input)
input = self.refpix.run(input)

# skip until MIRI team has figured out an algorithm
# input = self.persistence(input)
Expand All @@ -110,28 +110,28 @@ def process(self, input):
# process Near-IR exposures
log.debug('Processing a Near-IR exposure')

input = self.group_scale(input)
input = self.dq_init(input)
input = self.saturation(input)
input = self.ipc(input)
input = self.superbias(input)
input = self.refpix(input)
input = self.linearity(input)
input = self.group_scale.run(input)
input = self.dq_init.run(input)
input = self.saturation.run(input)
input = self.ipc.run(input)
input = self.superbias.run(input)
input = self.refpix.run(input)
input = self.linearity.run(input)

# skip persistence for NIRSpec
if instrument != 'NIRSPEC':
input = self.persistence(input)
input = self.persistence.run(input)

input = self.dark_current(input)
input = self.dark_current.run(input)

# apply the charge_migration step
input = self.charge_migration(input)
input = self.charge_migration.run(input)

# apply the jump step
input = self.jump(input)
input = self.jump.run(input)

# apply the clean_flicker_noise step
input = self.clean_flicker_noise(input)
input = self.clean_flicker_noise.run(input)

# save the corrected ramp data, if requested
if self.save_calibrated_ramp:
Expand All @@ -143,23 +143,23 @@ def process(self, input):
# objects, but when the step is skipped due to `skip = True`,
# only the input is returned when the step is invoked.
if self.ramp_fit.skip:
input = self.ramp_fit(input)
input = self.ramp_fit.run(input)
ints_model = None
else:
input, ints_model = self.ramp_fit(input)
input, ints_model = self.ramp_fit.run(input)

# apply the gain_scale step to the exposure-level product
if input is not None:
self.gain_scale.suffix = 'gain_scale'
input = self.gain_scale(input)
input = self.gain_scale.run(input)
else:
log.info("NoneType returned from ramp_fit. Gain Scale step skipped.")

# apply the gain scale step to the multi-integration product,
# if it exists, and then save it
if ints_model is not None:
self.gain_scale.suffix = 'gain_scaleints'
ints_model = self.gain_scale(ints_model)
ints_model = self.gain_scale.run(ints_model)
self.save_model(ints_model, 'rateints')

# setup output_file for saving
Expand All @@ -176,4 +176,4 @@ def setup_output(self, input):
if input.meta.cal_step.ramp_fit == 'COMPLETE':
self.suffix = 'rate'
else:
self.suffix = 'ramp'
self.suffix = 'ramp'
6 changes: 3 additions & 3 deletions jwst/pipeline/calwebb_guider.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,9 @@ def process(self, input):
input = datamodels.GuiderRawModel(input)

# Apply the steps
input = self.dq_init(input)
input = self.guider_cds(input)
input = self.flat_field(input)
input = self.dq_init.run(input)
input = self.guider_cds.run(input)
input = self.flat_field.run(input)

log.info('... ending calwebb_guider')

Expand Down
10 changes: 5 additions & 5 deletions jwst/pipeline/calwebb_image2.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,20 +150,20 @@ def process_exposure_product(
self.bkg_subtract.save_results = True

# Call the background subtraction step
input = self.bkg_subtract(input, members_by_type['background'])
input = self.bkg_subtract.run(input, members_by_type['background'])

# work on slope images
input = self.assign_wcs(input)
input = self.flat_field(input)
input = self.photom(input)
input = self.assign_wcs.run(input)
input = self.flat_field.run(input)
input = self.photom.run(input)

# Resample individual exposures, but only if it's one of the
# regular 2D science image types
if input.meta.exposure.type.upper() in self.image_exptypes and \
len(input.data.shape) == 2:
self.resample.save_results = self.save_results
self.resample.suffix = 'i2d'
self.resample(input)
self.resample.run(input)

# That's all folks
self.log.info(
Expand Down
16 changes: 8 additions & 8 deletions jwst/pipeline/calwebb_image3.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,24 +84,24 @@ def process(self, input_data):
is_moving = is_moving_target(model)
input_models.shelve(model, 0, modify=False)
if is_moving:
input_models = self.assign_mtwcs(input_models)
input_models = self.assign_mtwcs.run(input_models)
else:
input_models = self.tweakreg(input_models)
input_models = self.tweakreg.run(input_models)

input_models = self.skymatch(input_models)
input_models = self.outlier_detection(input_models)
input_models = self.skymatch.run(input_models)
input_models = self.outlier_detection.run(input_models)

elif self.skymatch.skymethod == 'match':
self.log.warning("Turning 'skymatch' step off for a single "
"input image when 'skymethod' is 'match'")

else:
input_models = self.skymatch(input_models)
input_models = self.skymatch.run(input_models)

result = self.resample(input_models)
result = self.resample.run(input_models)
del input_models
if isinstance(result, datamodels.ImageModel) and result.meta.cal_step.resample == 'COMPLETE':
self.source_catalog(result)
self.source_catalog.run(result)


def _load_input_as_library(self, input):
Expand All @@ -128,4 +128,4 @@ def _load_input_as_library(self, input):
elif isinstance(input, datamodels.JwstDataModel):
return ModelLibrary([input], asn_exptypes=['science'], on_disk=not self.in_memory)
else:
raise TypeError(f"Input type {type(input)} not supported.")
raise TypeError(f"Input type {type(input)} not supported.")
Loading

0 comments on commit ffa0266

Please sign in to comment.