Skip to content

Commit de35dc6

Browse files
committed
BUG: Fix SAT flag as suggested by Melanie Clarke
and fix tests. MNT: Add __all__ to ramp_fitting/utils.py DOC: Fix DQ propagation text. TST: Un-xfail test_read_pattern_saturation_flagging from #362
1 parent af54218 commit de35dc6

File tree

5 files changed

+42
-64
lines changed

5 files changed

+42
-64
lines changed

docs/stcal/ramp_fitting/description.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -297,8 +297,8 @@ in the "rate" product they contain values for the exposure as a whole.
297297

298298
Data Quality Propagation
299299
------------------------
300-
For a given pixel, if all groups in an integration are flagged as DO_NOT_USE or
301-
SATURATED, then that pixel will be flagged as DO_NOT_USE in the corresponding
300+
For a given pixel, if all groups in an integration are flagged as DO_NOT_USE,
301+
then that pixel will be flagged as DO_NOT_USE in the corresponding
302302
integration in the "rateints" product. Note this does NOT mean that all groups
303303
are flagged as DO_NOT_USE. For
304304
example, slope calculations that are suppressed due to a ramp containing only
@@ -350,7 +350,7 @@ differences. Because the covariance matrix is tridiagonal, the computational
350350
complexity reduces from :math:`O(n^3)` to :math:`O(n)`. To see the detailed
351351
derivation and computations implemented, refer to the links above.
352352
The Poisson and read noise computations are based on equations (27) and (28), in
353-
`Brandt (2024) <https://iopscience.iop.org/article/10.1088/1538-3873/ad38d9>`__
353+
`Brandt (2024) <https://iopscience.iop.org/article/10.1088/1538-3873/ad38d9>`__.
354354

355355
For more details, especially for the jump detection portion in the liklihood
356356
algorithm, see

src/stcal/ramp_fitting/ramp_fit.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@
2727

2828
BUFSIZE = 1024 * 300000 # 300Mb cache size for data section
2929

30+
__all__ = ["create_ramp_fit_class", "ramp_fit", "ramp_fit_data", "suppress_one_good_group_ramps"]
31+
3032

3133
def create_ramp_fit_class(model, algorithm, dqflags=None, suppress_one_group=False):
3234
"""
@@ -199,7 +201,8 @@ def ramp_fit_data(
199201
depending on the choice of ramp fitting algorithms (which is only ordinary
200202
least squares right now) and the choice of single or muliprocessing.
201203
202-
204+
Parameters
205+
----------
203206
ramp_data : RampData
204207
Input data necessary for computing ramp fitting.
205208
@@ -247,7 +250,11 @@ def ramp_fit_data(
247250
" but ngroups = {ngroups}. Due to this, the ramp fitting algorithm"
248251
" is being changed to OLS_C")
249252
algorithm = "OLS_C"
250-
253+
254+
# Suppress one group ramps, if desired.
255+
if ramp_data.suppress_one_group_ramps:
256+
suppress_one_good_group_ramps(ramp_data)
257+
251258
if algorithm.upper() == "LIKELY" and ngroups >= likely_fit.LIKELY_MIN_NGROUPS:
252259
image_info, integ_info, opt_info = likely_fit.likely_ramp_fit(
253260
ramp_data, readnoise_2d, gain_2d
@@ -259,10 +266,6 @@ def ramp_fit_data(
259266
nframes = ramp_data.nframes
260267
readnoise_2d *= gain_2d / np.sqrt(2.0 * nframes)
261268

262-
# Suppress one group ramps, if desired.
263-
if ramp_data.suppress_one_group_ramps:
264-
suppress_one_good_group_ramps(ramp_data)
265-
266269
# Compute ramp fitting using ordinary least squares.
267270
image_info, integ_info, opt_info = ols_fit.ols_ramp_fit_multi(
268271
ramp_data, save_opt, readnoise_2d, gain_2d, weighting, max_cores

src/stcal/ramp_fitting/utils.py

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
LARGE_VARIANCE = 1.0e8
1313
LARGE_VARIANCE_THRESHOLD = 0.01 * LARGE_VARIANCE
1414

15+
__all__ = ["set_if_total_ramp", "set_if_total_integ", "dq_compress_sect", "dq_compress_final"]
16+
1517

1618
def set_if_total_ramp(pixeldq_sect, gdq_sect, flag, set_flag):
1719
"""
@@ -72,11 +74,10 @@ def set_if_total_integ(final_dq, integ_dq, flag, set_flag):
7274

7375
def dq_compress_sect(ramp_data, num_int, gdq_sect, pixeldq_sect):
7476
"""
75-
This sets the integration level flags for DO_NOT_USE, JUMP_DET and
76-
SATURATED. If any ramp has a jump, this flag will be set for the
77+
This sets the integration level flags for DO_NOT_USE, JUMP_DET, and
78+
SATURATED. If any ramp has a jump or saturated, the respective flag will be set for the
7779
integration. If all groups in a ramp are flagged as DO_NOT_USE, then the
78-
integration level DO_NOT_USE flag will be set. If a ramp is saturated in
79-
group 0, then the integration level flag is marked as SATURATED. Also, if
80+
integration level DO_NOT_USE flag will be set. Also, if
8081
all groups are marked as DO_NOT_USE or SATURATED (as in suppressed one
8182
groups), then the DO_NOT_USE flag is set.
8283
@@ -107,10 +108,6 @@ def dq_compress_sect(ramp_data, num_int, gdq_sect, pixeldq_sect):
107108
# Check total SATURATED or DO_NOT_USE
108109
set_if_total_ramp(pixeldq_sect, gdq_sect, sat | dnu, dnu)
109110

110-
# Assume total saturation if group 0 is SATURATED.
111-
gdq0_sat = np.bitwise_and(gdq_sect[0], sat)
112-
pixeldq_sect[gdq0_sat != 0] = np.bitwise_or(pixeldq_sect[gdq0_sat != 0], sat | dnu)
113-
114111
# If saturation occurs mark the appropriate flag.
115112
sat_loc = np.bitwise_and(gdq_sect, sat)
116113
sat_check = np.where(sat_loc.sum(axis=0) > 0)
@@ -147,7 +144,6 @@ def dq_compress_final(dq_int, ramp_data):
147144
final_dq = np.bitwise_or(final_dq, dq_int[integ, :, :])
148145

149146
dnu = np.uint32(ramp_data.flags_do_not_use)
150-
sat = np.uint32(ramp_data.flags_saturated)
151147

152148
# Remove DO_NOT_USE because it needs special handling.
153149
# This flag is not set in the final pixel DQ array by simply being set
@@ -158,7 +154,4 @@ def dq_compress_final(dq_int, ramp_data):
158154
# If all integrations are DO_NOT_USE, then set DO_NOT_USE.
159155
set_if_total_integ(final_dq, dq_int, dnu, dnu)
160156

161-
# If all integrations have SATURATED, then set SATURATED.
162-
set_if_total_integ(final_dq, dq_int, sat, sat)
163-
164157
return final_dq

tests/test_ramp_fitting.py

Lines changed: 8 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -593,10 +593,7 @@ def test_one_group_ramp_suppressed_one_integration(algo):
593593
check = np.array([[np.nan, np.nan, 1.0000001]])
594594
np.testing.assert_allclose(sdata, check, tol)
595595

596-
if algo == DEFAULT_OLS:
597-
check = np.array([[DNU | SAT, DNU | SAT, GOOD]])
598-
else: # LIKELY
599-
check = np.array([[DNU | SAT, SAT, GOOD]])
596+
check = np.array([[DNU | SAT, DNU | SAT, GOOD]])
600597
np.testing.assert_equal(sdq, check)
601598

602599
if algo == DEFAULT_OLS:
@@ -623,10 +620,7 @@ def test_one_group_ramp_suppressed_one_integration(algo):
623620
check = np.array([[[np.nan, np.nan, 1.0000001]]])
624621
np.testing.assert_allclose(cdata, check, tol)
625622

626-
if algo == DEFAULT_OLS:
627-
check = np.array([[[DNU | SAT, DNU | SAT, GOOD]]])
628-
else: # LIKELY
629-
check = np.array([[[DNU | SAT, SAT, GOOD]]])
623+
check = np.array([[[DNU | SAT, DNU | SAT, GOOD]]])
630624
np.testing.assert_equal(cdq, check)
631625

632626
if algo == DEFAULT_OLS:
@@ -761,10 +755,7 @@ def test_one_group_ramp_suppressed_two_integrations(algo):
761755
check = np.array([[[np.nan, np.nan, 1.0000001]], [[1.0000001, 1.0000001, 1.0000001]]])
762756
np.testing.assert_allclose(cdata, check, tol)
763757

764-
if algo == DEFAULT_OLS:
765-
check = np.array([[[DNU | SAT, DNU | SAT, GOOD]], [[GOOD, GOOD, GOOD]]])
766-
else: # LIKELY
767-
check = np.array([[[DNU | SAT, SAT, GOOD]], [[GOOD, GOOD, GOOD]]])
758+
check = np.array([[[DNU | SAT, DNU | SAT, GOOD]], [[GOOD, GOOD, GOOD]]])
768759
np.testing.assert_equal(cdq, check)
769760

770761
if algo == DEFAULT_OLS:
@@ -1446,10 +1437,7 @@ def test_new_saturation(algo):
14461437
check = np.array([[2.794573, 2.793989, np.nan]])
14471438
np.testing.assert_allclose(sdata, check, tol, tol)
14481439

1449-
if algo == DEFAULT_OLS:
1450-
check = np.array([[JUMP | SAT, JUMP | SAT, DNU | SAT]])
1451-
else: # LIKELY
1452-
check = np.array([[JUMP | SAT, JUMP | DNU | SAT, DNU | SAT]])
1440+
check = np.array([[JUMP | SAT, JUMP | SAT, DNU | SAT]])
14531441
np.testing.assert_equal(sdq, check)
14541442

14551443
if algo == DEFAULT_OLS:
@@ -1566,10 +1554,7 @@ def test_invalid_integrations(algo):
15661554
check = np.array([[5576.588]])
15671555
np.testing.assert_allclose(sdata, check, tol, tol)
15681556

1569-
if algo == DEFAULT_OLS:
1570-
check = np.array([[JUMP | SAT]])
1571-
else: # LIKELY
1572-
check = np.array([[JUMP | SAT | DNU]])
1557+
check = np.array([[JUMP | SAT]])
15731558
np.testing.assert_equal(sdq, check)
15741559

15751560
if algo == DEFAULT_OLS:
@@ -1599,14 +1584,9 @@ def test_invalid_integrations(algo):
15991584
check = np.array([np.nan, np.nan, np.nan, 5576.588, np.nan, np.nan, np.nan, np.nan], dtype=np.float32)
16001585
np.testing.assert_allclose(cdata[:, 0, 0], check, tol, tol)
16011586

1602-
if algo == DEFAULT_OLS:
1603-
check = np.array(
1604-
[JUMP, JUMP | DNU, JUMP | DNU, GOOD, JUMP | DNU, JUMP | DNU, JUMP | DNU, JUMP | DNU], dtype=np.uint8
1605-
)
1606-
else: # LIKELY
1607-
check = np.array(
1608-
[JUMP, JUMP, JUMP, GOOD, JUMP, JUMP, JUMP, JUMP], dtype=np.uint8
1609-
)
1587+
check = np.array(
1588+
[JUMP, JUMP | DNU, JUMP | DNU, GOOD, JUMP | DNU, JUMP | DNU, JUMP | DNU, JUMP | DNU], dtype=np.uint8
1589+
)
16101590
check |= SAT
16111591
np.testing.assert_equal(cdq[:, 0, 0], check)
16121592

tests/test_saturation.py

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
55
"""
66
from enum import IntEnum
7-
import pytest
7+
88
import numpy as np
99

1010
from stcal.saturation.saturation import flag_saturated_pixels
@@ -40,7 +40,6 @@ def test_basic_saturation_flagging():
4040
assert np.all(gdq[0, satindex:, 5, 5] == DQFLAGS["SATURATED"])
4141

4242

43-
@pytest.mark.xfail(reason="stcal PR#321 broke this test")
4443
def test_read_pattern_saturation_flagging():
4544
"""Check that the saturation threshold varies depending on how the reads
4645
are allocated into resultants."""
@@ -75,8 +74,11 @@ def test_read_pattern_saturation_flagging():
7574
data, gdq, pdq, sat_thresh, sat_dq, ATOD_LIMIT, DQFLAGS, read_pattern=read_pattern
7675
)
7776

78-
# Make sure that groups after the third get flagged
79-
assert np.all(gdq[0, 2:, 5, 5] == DQFLAGS["SATURATED"])
77+
# Make sure that groups after the third get flagged.
78+
# Ken M - PR #321 introduced this behavior, but it may not be what's wanted.
79+
# For now, just test the current behavior.
80+
assert np.all(gdq[0, 3:, 5, 5] == DQFLAGS["SATURATED"])
81+
assert gdq[0, 2, 5, 5] == DQFLAGS["DO_NOT_USE"]
8082

8183

8284
def test_read_pattern_saturation_flagging_dnu():
@@ -120,7 +122,7 @@ def test_read_pattern_saturation_flagging_dnu():
120122

121123
def test_group2_saturation_flagging_with_bias():
122124
"""Flag group 2 saturation in frame-averaged data with significant bias.
123-
125+
124126
Saturation in frame-averaged groups may not exceed the saturation threshold
125127
until after the group where a saturating CR occurs. Special rules are used
126128
for the second group of frame-averaged data. Check that the saturation
@@ -142,35 +144,35 @@ def test_group2_saturation_flagging_with_bias():
142144
# Frame 5 in group 2 has a 40000 count CR saturating the pixel
143145
# averaged over 5 frames this only looks like a 8000 count jump in group 2
144146
# but group 3 signal saturates
145-
147+
146148
bias[5, 5] = 15000
147-
149+
148150
data[0, 0, 5, 5] = 18000
149151
data[0, 1, 5, 5] = 31000
150152
data[0, 2, 5, 5] = 68000
151153
data[0, 3, 5, 5] = 73000 # Signal reaches saturation limit
152154
data[0, 4, 5, 5] = 78000
153-
154-
155+
156+
155157
# Add another pixel with bias of 15000, but no source flux
156158
# pixel counts are 15000 > sat_thresh/5, but should not be flagged as
157159
# saturated.
158160
bias[15, 15] = 15000
159-
161+
160162
data[0, :, 15, 15] = 15000
161163

162164
# Set saturation value in the saturation model
163165
satvalue = 60000
164166
sat_thresh[5, 5] = satvalue
165167
sat_thresh[15, 15] = satvalue
166-
168+
167169

168170
# set read_pattern to have 5 reads per group.
169171
read_pattern = [
170-
[1, 2, 3, 4, 5],
171-
[6, 7, 8, 9, 10],
172-
[11, 12, 13, 14, 15],
173-
[16, 17, 18, 19, 20],
172+
[1, 2, 3, 4, 5],
173+
[6, 7, 8, 9, 10],
174+
[11, 12, 13, 14, 15],
175+
[16, 17, 18, 19, 20],
174176
[21, 22, 23, 24, 25]
175177
]
176178

0 commit comments

Comments
 (0)