diff --git a/CHANGES.rst b/CHANGES.rst index f28fb06a79..4bdc899d4f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,7 +1,39 @@ -1.13.3 (unreleased) +1.13.4 (unreleased) =================== -- +- + + +1.13.3 (01-05-2024) +=================== + +documentation +------------- + +- Updated many docs to change the use of unordered/bullet lists to + numbered lists, to avoid formatting issues in html pages. [#8156] + +- Added arguments docs for the ``assign_wcs`` step. [#8156] + +- Added ``in_memory`` to the arguments lists in the ``outlier_detection`` + and ``resample`` steps. [#8156] + +- Added instructions to the README for setting CRDS_CONTEXT to a specific + value. [#8156] + +- Removed unused ``grow`` parameter from ``outlier_detection`` docs. [#8156] + +outlier_detection +----------------- + +- Removed the ``grow`` parameter from the step arguments, because it's no + longer used in the algorithms. [#8156] + +ramp_fitting +------------ + +- Updated the argument description and parameter definition for `maximum_cores` + to accept integer values to be passed to STCAL ramp_fit.py. [#8123] 1.13.2 (2023-12-21) =================== diff --git a/CITATION.cff b/CITATION.cff index 72e7aca8b4..e3bf06a6b3 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -75,7 +75,7 @@ authors: given-names: "Maria" orcid: "https://orcid.org/0000-0003-2314-3453" title: "JWST Calibration Pipeline" -version: 1.13.2 +version: 1.13.3 doi: 10.5281/zenodo.7038885 -date-released: 2023-12-21 +date-released: 2024-01-05 url: "https://github.com/spacetelescope/jwst" diff --git a/README.md b/README.md index 9bf73aff07..0dbb268d76 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,10 @@ ![STScI Logo](docs/_static/stsci_logo.png) -**JWST requires Python 3.9 or above and a C compiler for dependencies.** +**JWST requires a C compiler for dependencies and is currently limited to Python 3.9, 3.10 or 3.11.** + +**Until Python 3.12 is supported, fresh conda environments will require setting the + Python version to one of the three supported versions.** **Linux and MacOS platforms are tested and supported. Windows is not currently supported.** @@ -50,13 +53,13 @@ Remember that all conda operations must be done from within a bash/zsh shell. You can install the latest released version via `pip`. From a bash/zsh shell: - conda create -n python + conda create -n python=3.11 conda activate pip install jwst You can also install a specific version: - conda create -n python + conda create -n python=3.11 conda activate pip install jwst==1.9.4 @@ -65,7 +68,7 @@ You can also install a specific version: You can install the latest development version (not as well tested) from the Github master branch: - conda create -n python + conda create -n python=3.11 conda activate pip install git+https://github.com/spacetelescope/jwst @@ -117,7 +120,7 @@ already installed with released versions of the `jwst` package. As usual, the first two steps are to create and activate an environment: - conda create -n python + conda create -n python=3.11 conda activate To install your own copy of the code into that environment, you first need to @@ -170,6 +173,11 @@ two environment variables: ```` can be any the user has permissions to use, such as `$HOME`. Expect to use upwards of 200GB of disk space to cache the latest couple of contexts. +To use a specific CRDS context, other than the current default, set the ``CRDS_CONTEXT`` +environment variable: + + export CRDS_CONTEXT=jwst_1179.pmap + ## Documentation Documentation (built daily from the Github `master` branch) is available at: @@ -210,6 +218,7 @@ the specified context and less than the context for the next release. | jwst tag | DMS build | SDP_VER | CRDS_CONTEXT | Released | Ops Install | Notes | |---------------------|-----------|----------|--------------|------------|-------------|-----------------------------------------------| +| 1.13.3 | B10.1rc4 | 2023.4.0 | 1181 | 2024-01-05 | | Fourth release candidate for B10.1 | | 1.13.2 | B10.1rc3 | 2023.4.0 | 1181 | 2023-12-21 | | Third release candidate for B10.1 | | 1.13.1 | B10.1rc2 | 2023.4.0 | 1181 | 2023-12-19 | | Second release candidate for B10.1 | | 1.13.0 | B10.1rc1 | 2023.4.0 | 1179 | 2023-12-15 | | First release candidate for B10.1 | diff --git a/docs/jwst/assign_wcs/arguments.rst b/docs/jwst/assign_wcs/arguments.rst new file mode 100644 index 0000000000..9ec3b7af9b --- /dev/null +++ b/docs/jwst/assign_wcs/arguments.rst @@ -0,0 +1,32 @@ +Step Arguments +============== + +The ``assign_wcs`` step has the following optional arguments to control +the behavior of the processing. + +``--sip_approx`` (boolean, default=True) + A flag to enable the computation of a SIP approximation for + imaging modes. + +``--sip_degree`` (integer, max=6, default=None) + Polynomial degree for the forward SIP fit. "None" uses the best fit. + +``--sip_max_pix_error`` (float, default=0.1) + Maximum error for the SIP forward fit, in units of pixels. Ignored if + ``sip_degree`` is set to an explicit value. + +``--sip_inv_degree`` (integer, max=6, default=None) + Polynomial degree for the inverse SIP fit. "None" uses the best fit. + +``--sip_max_inv_pix_error`` (float, default=0.1) + Maximum error for the SIP inverse fit, in units of pixels. Ignored if + ``sip_inv_degree`` is set to an explicit value. + +``--sip_npoints`` (integer, default=12) + Number of points for the SIP fit. + +``--slit_y_low`` (float, default=-0.55) + Lower edge of a NIRSpec slit. + +``--slit_y_high`` (float, default=0.55) + Upper edge of a NIRSpec slit. diff --git a/docs/jwst/assign_wcs/index.rst b/docs/jwst/assign_wcs/index.rst index 5d119ca465..b176d180ab 100644 --- a/docs/jwst/assign_wcs/index.rst +++ b/docs/jwst/assign_wcs/index.rst @@ -8,6 +8,7 @@ Assign WCS :maxdepth: 1 main.rst + arguments.rst reference_files.rst asdf-howto.rst exp_types.rst diff --git a/docs/jwst/background_step/description.rst b/docs/jwst/background_step/description.rst index aa2cacb769..183c8e1951 100644 --- a/docs/jwst/background_step/description.rst +++ b/docs/jwst/background_step/description.rst @@ -35,42 +35,42 @@ image depends on whether the background exposures are "rate" (2D) or "rateint" (3D) exposures. In the case of "rate" exposures, the average background image is produced as follows: - * Clip the combined SCI arrays of all background exposures. For mixtures - of full chip and subarray data, only overlapping regions are used - * Compute the mean of the unclipped SCI values - * Sum in quadrature the ERR arrays of all background exposures, clipping the +#. Clip the combined SCI arrays of all background exposures. For mixtures + of full chip and subarray data, only overlapping regions are used +#. Compute the mean of the unclipped SCI values +#. Sum in quadrature the ERR arrays of all background exposures, clipping the same input values as determined for the SCI arrays, and convert the result to an uncertainty in the mean - * Combine the DQ arrays of all background exposures using a bitwise OR +#. Combine the DQ arrays of all background exposures using a bitwise OR operation In the case of "rateint" exposures, each background exposure can have multiple integrations, so calculations are slightly more involved. The "overall" average background image is produced as follows: - * Clip the SCI arrays of each background exposure along its integrations - * Compute the mean of the unclipped SCI values to yield an average image for +#. Clip the SCI arrays of each background exposure along its integrations +#. Compute the mean of the unclipped SCI values to yield an average image for each background exposure - * Clip the means of all background exposure averages - * Compute the mean of the unclipped background exposure averages to yield the +#. Clip the means of all background exposure averages +#. Compute the mean of the unclipped background exposure averages to yield the "overall" average background image - * Sum in quadrature the ERR arrays of all background exposures, clipping the +#. Sum in quadrature the ERR arrays of all background exposures, clipping the same input values as determined for the SCI arrays, and convert the result to an uncertainty in the mean (This is not yet implemented) - * Combine the DQ arrays of all background exposures, by first using a bitwise +#. Combine the DQ arrays of all background exposures, by first using a bitwise OR operation over all integrations in each exposure, followed by doing by a bitwise OR operation over all exposures. The average background exposure is then subtracted from the target exposure. The subtraction consists of the following operations: - * The SCI array of the average background is subtracted from the SCI +#. The SCI array of the average background is subtracted from the SCI array of the target exposure - * The ERR array of the target exposure is currently unchanged, until full +#. The ERR array of the target exposure is currently unchanged, until full error propagation is implemented in the entire pipeline - * The DQ arrays of the average background and the target exposure are +#. The DQ arrays of the average background and the target exposure are combined using a bitwise OR operation If the target exposure is a simple ImageModel, the background image is diff --git a/docs/jwst/cube_build/main.rst b/docs/jwst/cube_build/main.rst index 855e53af3e..071925e9f0 100644 --- a/docs/jwst/cube_build/main.rst +++ b/docs/jwst/cube_build/main.rst @@ -11,13 +11,13 @@ spatial and one spectral. The ``cube_build`` step can accept several different forms of input data, including: - - a single file containing a 2-D IFU image +#. A single file containing a 2-D IFU image - - a data model (IFUImageModel) containing a 2-D IFU image +#. A data model (`~jwst.datamodels.IFUImageModel`) containing a 2-D IFU image - - an association table (in json format) containing a list of input files +#. An association table (in json format) containing a list of input files - - a model container with several 2-D IFU data models +#. A model container with several 2-D IFU data models There are a number of arguments the user can provide either in a parameter file or on the command line that control the sampling size of the cube, as well as the type of data @@ -98,8 +98,8 @@ We use the following terminology to define the spectral range divisions of MIRI: example, the shortest wavelength range on MIRI is covered by Band 1-SHORT (aka 1A) and the longest is covered by Band 4-LONG (aka 4C). - For **NIRSpec** we define a *band* as a single grating-filter combination, e.g. G140M-F070LP. The possible grating/filter - combinations for NIRSpec are given in the table below. + For **NIRSpec** we define a *band* as a single grating-filter combination, e.g. G140M-F070LP. The possible grating/filter + combinations for NIRSpec are given in the table below. NIRSpec IFU Disperser and Filter Combinations +++++++++++++++++++++++++++++++++++++++++++++ @@ -355,7 +355,7 @@ user with the options: ``rois`` and ``roiw``. If *n* point cloud members are located within the ROI of a voxel, the voxel flux K = :math:`\frac{ \sum_{i=1}^n Flux_i w_i}{\sum_{i=1}^n w_i}` -where the weighting ``weighting=emsm`` is +where the weighting ``weighting=emsm`` is: :math:`w_i =e\frac{ -({xnormalized}_i^2 + {ynormalized}_i^2 + {znormalized}_i^2)} {scale factor}` diff --git a/docs/jwst/dark_current/description.rst b/docs/jwst/dark_current/description.rst index c9612c9d88..cc433f4c84 100644 --- a/docs/jwst/dark_current/description.rst +++ b/docs/jwst/dark_current/description.rst @@ -35,9 +35,9 @@ GROUPGAP intervening frames. The frame-averaged dark is constructed using the following scheme: -* SCI arrays are computed as the mean of the original dark SCI arrays -* ERR arrays are computed as the uncertainty in the mean, using - :math:`\frac{\sqrt {\sum \mathrm{ERR}^2}}{nframes}` +#. SCI arrays are computed as the mean of the original dark SCI arrays +#. ERR arrays are computed as the uncertainty in the mean, using + :math:`\frac{\sqrt {\sum \mathrm{ERR}^2}}{nframes}` The dark reference data are not integration-dependent for most instruments, hence the same group-by-group dark current data are subtracted from every diff --git a/docs/jwst/dq_init/description.rst b/docs/jwst/dq_init/description.rst index 5abcc8fa56..23d72aec51 100644 --- a/docs/jwst/dq_init/description.rst +++ b/docs/jwst/dq_init/description.rst @@ -13,20 +13,20 @@ integrations for a given pixel. The actual process consists of the following steps: - - Determine what MASK reference file to use via the interface to the bestref +#. Determine what MASK reference file to use via the interface to the bestref utility in CRDS. - - If the "PIXELDQ" or "GROUPDQ" arrays of the input dataset do not already exist, +#. If the "PIXELDQ" or "GROUPDQ" arrays of the input dataset do not already exist, which is sometimes the case for raw input products, create these arrays in the input data model and initialize them to zero. The "PIXELDQ" array will be 2D, with the same number of rows and columns as the input science data. The "GROUPDQ" array will be 4D with the same dimensions (nints, ngroups, nrows, ncols) as the input science data array. - - Check to see if the input science data is in subarray mode. If so, extract a +#. Check to see if the input science data is in subarray mode. If so, extract a matching subarray from the full-frame MASK reference file. - - Propagate the DQ flags from the reference file DQ array to the science data "PIXELDQ" +#. Propagate the DQ flags from the reference file DQ array to the science data "PIXELDQ" array using numpy's ``bitwise_or`` function. Note that when applying the ``dq_init`` step to FGS guide star data, as is done in diff --git a/docs/jwst/extract_1d/description.rst b/docs/jwst/extract_1d/description.rst index 7f02f36658..a4adc36b06 100644 --- a/docs/jwst/extract_1d/description.rst +++ b/docs/jwst/extract_1d/description.rst @@ -179,28 +179,28 @@ each column (or row, if dispersion is vertical), using pixel values from all background regions within each column (or row). Parameters related to background subtraction are ``smoothing_length``, -``bkg_fit``, and ``bkg_order``. - -* If ``smoothing_length`` is specified, the 2D image data used to perform - background extraction will be smoothed along the dispersion direction using - a boxcar of width ``smoothing_length`` (in pixels). If not specified, no - smoothing of the input 2D image data is performed. - -* ``bkg_fit`` specifies the type of background computation to be performed - within each column (or row). The default value is None; if not set by - the user, the step will search the reference file for a value. If no value - is found, ``bkg_fit`` will be set to "poly". The "poly" mode fits a - polynomial of order ``bkg_order`` to the background values within - the column (or row). Alternatively, values of "mean" or "median" can be - specified in order to compute the simple mean or median of the background - values in each column (or row). Note that using "bkg_fit=mean" is - mathematically equivalent to "bkg_fit=poly" with "bkg_order=0". If ``bkg_fit`` - is provided both by a reference file and by the user, e.g. - ``steps.extract_1d.bkg_fit='poly'``, the user-supplied value will override - the reference file value. - -* If ``bkg_fit=poly`` is specified, ``bkg_order`` is used to indicate the - polynomial order to be used. The default value is zero, i.e. a constant. +``bkg_fit``, and ``bkg_order``: + +#. If ``smoothing_length`` is specified, the 2D image data used to perform + background extraction will be smoothed along the dispersion direction using + a boxcar of width ``smoothing_length`` (in pixels). If not specified, no + smoothing of the input 2D image data is performed. + +#. ``bkg_fit`` specifies the type of background computation to be performed + within each column (or row). The default value is None; if not set by + the user, the step will search the reference file for a value. If no value + is found, ``bkg_fit`` will be set to "poly". The "poly" mode fits a + polynomial of order ``bkg_order`` to the background values within + the column (or row). Alternatively, values of "mean" or "median" can be + specified in order to compute the simple mean or median of the background + values in each column (or row). Note that using "bkg_fit=mean" is + mathematically equivalent to "bkg_fit=poly" with "bkg_order=0". If ``bkg_fit`` + is provided both by a reference file and by the user, e.g. + ``steps.extract_1d.bkg_fit='poly'``, the user-supplied value will override + the reference file value. + +#. If ``bkg_fit=poly`` is specified, ``bkg_order`` is used to indicate the + polynomial order to be used. The default value is zero, i.e. a constant. During source extraction, the background fit is evaluated at each pixel within the source extraction region for that column (row), and the fitted values will diff --git a/docs/jwst/flatfield/main.rst b/docs/jwst/flatfield/main.rst index 5187cd38f3..3cd4111764 100644 --- a/docs/jwst/flatfield/main.rst +++ b/docs/jwst/flatfield/main.rst @@ -24,35 +24,35 @@ modes included in this category are NIRCam WFSS and Time-Series Grism, NIRISS WFSS and SOSS, and MIRI MRS and LRS. All of these modes are processed as follows: -- If the science data have been taken using a subarray and the FLAT - reference file is a full-frame image, extract the corresponding subarray - region from the flat-field data. +#. If the science data have been taken using a subarray and the FLAT + reference file is a full-frame image, extract the corresponding subarray + region from the flat-field data. -- Find pixels that have a value of NaN or zero in the FLAT reference file - SCI array and set their DQ values to "NO_FLAT_FIELD" and "DO_NOT_USE." +#. Find pixels that have a value of NaN or zero in the FLAT reference file + SCI array and set their DQ values to "NO_FLAT_FIELD" and "DO_NOT_USE." -- Reset the values of pixels in the flat that have DQ="NO_FLAT_FIELD" to - 1.0, so that they have no effect when applied to the science data. +#. Reset the values of pixels in the flat that have DQ="NO_FLAT_FIELD" to + 1.0, so that they have no effect when applied to the science data. -- Propagate the FLAT reference file DQ values into the science exposure - DQ array using a bitwise OR operation. +#. Propagate the FLAT reference file DQ values into the science exposure + DQ array using a bitwise OR operation. -- Apply the flat according to: +#. Apply the flat according to: - .. math:: - SCI_{science} = SCI_{science} / SCI_{flat} + .. math:: + SCI_{science} = SCI_{science} / SCI_{flat} - .. math:: - VAR\_POISSON_{science} = VAR\_POISSON_{science} / SCI_{flat}^2 + .. math:: + VAR\_POISSON_{science} = VAR\_POISSON_{science} / SCI_{flat}^2 - .. math:: - VAR\_RNOISE_{science} = VAR\_RNOISE_{science} / SCI_{flat}^2 + .. math:: + VAR\_RNOISE_{science} = VAR\_RNOISE_{science} / SCI_{flat}^2 - .. math:: - VAR\_FLAT_{science} = ( SCI_{science}^{2} / SCI_{flat}^{2} ) * ERR_{flat}^{2} + .. math:: + VAR\_FLAT_{science} = ( SCI_{science}^{2} / SCI_{flat}^{2} ) * ERR_{flat}^{2} - .. math:: - ERR_{science} = \sqrt{VAR\_POISSON + VAR\_RNOISE + VAR\_FLAT} + .. math:: + ERR_{science} = \sqrt{VAR\_POISSON + VAR\_RNOISE + VAR\_FLAT} Multi-integration datasets ("_rateints.fits" products), which are common for modes like NIRCam Time-Series Grism, NIRISS SOSS, and MIRI LRS Slitless, diff --git a/docs/jwst/jump/description.rst b/docs/jwst/jump/description.rst index 3544351fbd..7ed340bb2c 100644 --- a/docs/jwst/jump/description.rst +++ b/docs/jwst/jump/description.rst @@ -40,34 +40,34 @@ Two-Point Difference Method ^^^^^^^^^^^^^^^^^^^^^^^^^^^ The two-point difference method is applied to each integration as follows: -* Compute the first differences for each pixel (the difference between - adjacent groups) -* Compute the clipped (dropping the largest difference) median of the first differences for each pixel. -* Use the median to estimate the Poisson noise for each group and combine it - with the read noise to arrive at an estimate of the total expected noise for - each difference. -* Compute the "difference ratio" as the difference between the first differences - of each group and the median, divided by the expected noise. -* If the largest "difference ratio" is greater than the rejection threshold, - flag the group corresponding to that ratio as having a jump. -* If a jump is found in a given pixel, iterate the above steps with the - jump-impacted group excluded, looking for additional lower-level jumps - that still exceed the rejection threshold. -* Stop iterating on a given pixel when no new jumps are found or only one - difference remains. -* If the there are only three differences (four groups), the standard median - is used rather than the clipped median. -* If there are only two differences (three groups), the smallest one is compared to the larger - one and if the larger one is above a threshold, it is flagged as a jump. - -* If flagging of the 4 neighbors is requested, then the 4 adjacent pixels will - have ramp jumps flagged in the same group as the central pixel as long as it has - a jump between the min and max requested levels for this option. - -* If flagging of groups after a ramp jump is requested, then the groups in the - requested time since a detected ramp jump will be flagged as ramp jumps if - the ramp jump is above the requested threshold. Two thresholds and times are - possible for this option. +#. Compute the first differences for each pixel (the difference between + adjacent groups) +#. Compute the clipped (dropping the largest difference) median of the first differences for each pixel. +#. Use the median to estimate the Poisson noise for each group and combine it + with the read noise to arrive at an estimate of the total expected noise for + each difference. +#. Compute the "difference ratio" as the difference between the first differences + of each group and the median, divided by the expected noise. +#. If the largest "difference ratio" is greater than the rejection threshold, + flag the group corresponding to that ratio as having a jump. +#. If a jump is found in a given pixel, iterate the above steps with the + jump-impacted group excluded, looking for additional lower-level jumps + that still exceed the rejection threshold. +#. Stop iterating on a given pixel when no new jumps are found or only one + difference remains. +#. If the there are only three differences (four groups), the standard median + is used rather than the clipped median. +#. If there are only two differences (three groups), the smallest one is compared to the larger + one and if the larger one is above a threshold, it is flagged as a jump. + +#. If flagging of the 4 neighbors is requested, then the 4 adjacent pixels will + have ramp jumps flagged in the same group as the central pixel as long as it has + a jump between the min and max requested levels for this option. + +#. If flagging of groups after a ramp jump is requested, then the groups in the + requested time since a detected ramp jump will be flagged as ramp jumps if + the ramp jump is above the requested threshold. Two thresholds and times are + possible for this option. Note that any ramp values flagged as SATURATED in the input GROUPDQ array are not used in any of the above calculations and hence will never be @@ -106,7 +106,13 @@ surrounded by a halo of pixels that have a low level of excess counts. These excess counts are, in general, below the detection threshold of normal cosmic rays. -To constrain the effect of this halo the jump step will fit ellipses or circles that enclose the large events and expand the ellipses and circles by the input expansion_factor and mark them as jump. +To constrain the effect of this halo, the jump step will fit ellipses or circles that +enclose the large events and expand the ellipses and circles by the input expansion_factor +and mark them as jump. -The two types of detectors respond differently. The large events in the near infrared detectors are almost always circles with a central region that is saturated. The saturated core allows the search for smaller events without false positives. -The MIRI detectors do not, in general, have a saturated center and are only rarely circular. Thus, we fit the minimum enclosing ellipse and do not require that there are saturated pixels within the ellipse. +The two types of detectors respond differently. The large events in the near-infrared +detectors are almost always circles with a central region that is saturated. +The saturated core allows the search for smaller events without false positives. +The MIRI detectors do not, in general, have a saturated center and are only rarely circular. +Thus, we fit the minimum enclosing ellipse and do not require that there are saturated pixels +within the ellipse. diff --git a/docs/jwst/linearity/description.rst b/docs/jwst/linearity/description.rst index fa86277707..90f6466fcf 100644 --- a/docs/jwst/linearity/description.rst +++ b/docs/jwst/linearity/description.rst @@ -50,18 +50,18 @@ set to "COMPLETE". Special Handling ++++++++++++++++ -- Pixels having at least one correction coefficient equal to NaN will not have - the linearity correction applied and the DQ flag "NO_LIN_CORR" is added to - the science exposure PIXELDQ array. - -- Pixels that have the "NO_LIN_CORR" flag set in the DQ array of the linearity - reference file will not have the correction applied and the "NO_LIN_CORR" flag - is added to the science exposure PIXELDQ array. - -- Pixel values that have the "SATURATED" flag set in a particular group of the - science exposure GROUPDQ array will not have the linearity correction - applied to that group. Any groups for that pixel that are not flagged as - saturated will be corrected. +#. Pixels having at least one correction coefficient equal to NaN will not have + the linearity correction applied and the DQ flag "NO_LIN_CORR" is added to + the science exposure PIXELDQ array. + +#. Pixels that have the "NO_LIN_CORR" flag set in the DQ array of the linearity + reference file will not have the correction applied and the "NO_LIN_CORR" flag + is added to the science exposure PIXELDQ array. + +#. Pixel values that have the "SATURATED" flag set in a particular group of the + science exposure GROUPDQ array will not have the linearity correction + applied to that group. Any groups for that pixel that are not flagged as + saturated will be corrected. The ERR array of the input science exposure is not modified. diff --git a/docs/jwst/master_background/description.rst b/docs/jwst/master_background/description.rst index 2a34cae996..a436ecdc1d 100644 --- a/docs/jwst/master_background/description.rst +++ b/docs/jwst/master_background/description.rst @@ -214,22 +214,22 @@ each containing data from multiple slits, the subtraction is applied one-by-one instances in all exposures. For each data instance to be subtracted the following steps are performed: -- Compute a 2-D wavelength grid corresponding to the 2-D source data. For some observing modes, - such as NIRSpec MOS and fixed-slit, a 2-D wavelength array is already computed and attached to the data - in the :ref:`calwebb_spec2 ` pipeline :ref:`extract_2d ` step. - If such a wavelength array is present, it is used. For modes that don't have a 2-D - wavelength array contained in the data, it is computed on the fly using the WCS object - for each source data instance. - -- Compute the background signal at each pixel in the 2-D wavelength grid by interpolating within - the 1-D master background spectrum as a function of wavelength. - Pixels in the 2-D source data with an undefined wavelength (e.g. wavelength array value - of NaN) or a wavelength that is beyond the limits of the master background spectrum receive - special handling. The interpolated background value is set to zero and a DQ flag of - "DO_NOT_USE" is set. - -- Subtract the resulting 2-D background image from the 2-D source data. DQ values from the - 2-D background image are propagated into the DQ array of the subtracted science data. +#. Compute a 2-D wavelength grid corresponding to the 2-D source data. For some observing modes, + such as NIRSpec MOS and fixed-slit, a 2-D wavelength array is already computed and attached to the data + in the :ref:`calwebb_spec2 ` pipeline :ref:`extract_2d ` step. + If such a wavelength array is present, it is used. For modes that don't have a 2-D + wavelength array contained in the data, it is computed on the fly using the WCS object + for each source data instance. + +#. Compute the background signal at each pixel in the 2-D wavelength grid by interpolating within + the 1-D master background spectrum as a function of wavelength. + Pixels in the 2-D source data with an undefined wavelength (e.g. wavelength array value + of NaN) or a wavelength that is beyond the limits of the master background spectrum receive + special handling. The interpolated background value is set to zero and a DQ flag of + "DO_NOT_USE" is set. + +#. Subtract the resulting 2-D background image from the 2-D source data. DQ values from the + 2-D background image are propagated into the DQ array of the subtracted science data. .. _nirspec_modes: @@ -241,19 +241,19 @@ operations that need to be applied to accommodate some of the unique calibration applied to NIRSpec data. NIRSpec MOS mode requires even more special handling. This is due to two primary effects of NIRSpec calibration: -- Point sources in MOS and Fixed-Slit mode receive wavelength offset - corrections if the source is not centered (along the dispersion direction) within the slit. - Hence the wavelength grid assigned to each 2-D slit cutout can be shifted slightly relative - to the wavelengths of the background signal contained in the same cutout. And because the - flat-field, pathloss, and photom corrections/calibrations are wavelength-dependent, the - pixel-level calibrations for the source signal are slightly different than the background. +#. Point sources in MOS and Fixed-Slit mode receive wavelength offset + corrections if the source is not centered (along the dispersion direction) within the slit. + Hence the wavelength grid assigned to each 2-D slit cutout can be shifted slightly relative + to the wavelengths of the background signal contained in the same cutout. And because the + flat-field, pathloss, and photom corrections/calibrations are wavelength-dependent, the + pixel-level calibrations for the source signal are slightly different than the background. -- Point sources and uniform sources receive different pathloss and bar shadow corrections - (in fact point sources don't receive any bar shadow correction). So the background signal - contained within a calibrated point source cutout has received a different pathloss - correction and hasn't received any bar shadow correction. Meanwhile, the master background - is created from data that had corrections for a uniform source applied to it and hence - there's a mismatch relative to the point source data. +#. Point sources and uniform sources receive different pathloss and bar shadow corrections + (in fact point sources don't receive any bar shadow correction). So the background signal + contained within a calibrated point source cutout has received a different pathloss + correction and hasn't received any bar shadow correction. Meanwhile, the master background + is created from data that had corrections for a uniform source applied to it and hence + there's a mismatch relative to the point source data. The 2-D background that's initially created from the 1-D master background is essentially a perfectly calibrated background signal. However, due to the effects mentioned above, the @@ -371,28 +371,28 @@ The detailed list of operations performed when applying master background subtraction to MOS data during :ref:`calwebb_spec2 ` processing is as follows: -1) Process all slitlets in the MOS exposure up through the +#. Process all slitlets in the MOS exposure up through the :ref:`extract_2d ` and :ref:`srctype ` steps -2) The `master_background_mos` step temporarily applies remaining calibration +#. The `master_background_mos` step temporarily applies remaining calibration steps up through :ref:`photom ` to all slits, treating them all as extended sources (appropriate for background signal), and saving the extended source correction arrays for each slit in an internal copy of the data model -3) If a user-supplied master background spectrum is **not** given, the +#. If a user-supplied master background spectrum is **not** given, the :ref:`resample_spec ` and :ref:`extract_1d ` steps are applied to the calibrated background slits, resulting in extracted 1D background spectra -4) The 1D background spectra are combined, using the +#. The 1D background spectra are combined, using the :ref:`combine_1d ` step, into a master background spectrum -5) If a user-supplied master background **is** given, steps 3 and 4 are skipped and +#. If a user-supplied master background **is** given, steps 3 and 4 are skipped and the user-supplied spectrum is inserted into the processing flow -6) The master background spectrum (either user-supplied or created on-the-fly) is +#. The master background spectrum (either user-supplied or created on-the-fly) is expanded into the 2D space of each slit -7) The 2D background "image" for each slit is processed in **inverse** mode through +#. The 2D background "image" for each slit is processed in **inverse** mode through the :ref:`photom `, :ref:`barshadow `, :ref:`pathloss `, and :ref:`flatfield ` steps, using the correction arrays that were computed in step 2, so that the background data now matches the partially calibrated background signal in each slit -8) The corrected 2D background is subtracted from each slit -9) The background-subtracted slits are processed through all remaining +#. The corrected 2D background is subtracted from each slit +#. The background-subtracted slits are processed through all remaining :ref:`calwebb_spec2 ` calibration steps, using the corrections appropriate for the source type in each slit diff --git a/docs/jwst/outlier_detection/arguments.rst b/docs/jwst/outlier_detection/arguments.rst index 889866fbaf..a7b462dd8e 100644 --- a/docs/jwst/outlier_detection/arguments.rst +++ b/docs/jwst/outlier_detection/arguments.rst @@ -36,9 +36,6 @@ that control the behavior of the processing: The percent of maximum weight to use as lower-limit for valid data; valid values go from 0.0 to 1.0. -``--grow`` (integer, default=1) - The radius, in pixels, from a bad pixel for neighbor rejection. - ``--snr`` (string, default='4.0 3.0') The signal-to-noise values to use for bad pixel identification. Valid values are a pair of floating-point values in a single string. @@ -77,6 +74,10 @@ that control the behavior of the processing: For example, if set to ``0.5``, only resampled images that use less than half the available memory can be created. +``--in_memory`` (boolean, default=False) + Specifies whether or not to load and create all images that are used during + processing into memory. If ``False``, input files are loaded from disk when + needed and all intermediate files are stored on disk, rather than in memory. Step Arguments for IFU data =========================== @@ -94,3 +95,8 @@ that control the behavior of the processing: ``--save_intermediate_results`` (boolean, default=False) Specifies whether or not to save any intermediate products created during step processing. + +``--in_memory`` (boolean, default=False) + Specifies whether or not to load and create all images that are used during + processing into memory. If ``False``, input files are loaded from disk when + needed and all intermediate files are stored on disk, rather than in memory. diff --git a/docs/jwst/outlier_detection/main.rst b/docs/jwst/outlier_detection/main.rst index 7b17684707..a0f8291f83 100644 --- a/docs/jwst/outlier_detection/main.rst +++ b/docs/jwst/outlier_detection/main.rst @@ -10,43 +10,42 @@ were not detectable by the :ref:`jump ` step. The ``outlier_detection implements the following algorithm to identify and flag any remaining cosmic-rays or other artifacts left over from previous calibrations: - - build a stack of input data +#. build a stack of input data - - all inputs will need to have the same WCS since outlier detection assumes - the same flux for each point on the sky, and variations from one image to - the next would indicate a problem with the detector during readout of that - pixel - - if needed, each input will be resampled to a common output WCS + - all inputs will need to have the same WCS since outlier detection assumes + the same flux for each point on the sky, and variations from one image to + the next would indicate a problem with the detector during readout of that pixel + - if needed, each input will be resampled to a common output WCS - - create a median image from the stack of input data +#. create a median image from the stack of input data - - this median operation will ignore any input pixels which have a weight - which is too low (<70% max weight) + - this median operation will ignore any input pixels which have a weight + which is too low (<70% max weight) - - create "blotted" data from the median image to exactly match each original - input dataset +#. create "blotted" data from the median image to exactly match each original + input dataset - - perform a statistical comparison (pixel-by-pixel) between the median blotted - data with the original input data to look for pixels with values that are - different from the mean value by more than some specified sigma - based on the noise model +#. perform a statistical comparison (pixel-by-pixel) between the median blotted + data with the original input data to look for pixels with values that are + different from the mean value by more than some specified sigma + based on the noise model - - the noise model used relies on the error array computed by previous - calibration steps based on the readnoise and calibration errors + - the noise model used relies on the error array computed by previous + calibration steps based on the readnoise and calibration errors - - flag the DQ array for the input data for any pixel (or affected neighboring - pixels) identified as a statistical outlier +#. flag the DQ array for the input data for any pixel (or affected neighboring + pixels) identified as a statistical outlier The outlier detection step serves as a single interface to apply this general process to any JWST data, with specific variations of this algorithm for each type of data. Sub-classes of the outlier detection algorithm have been developed -specifically for +specifically for: - - Imaging data - - IFU spectroscopic data - - TSO data - - coronagraphic data - - spectroscopic data +#. Imaging data +#. IFU spectroscopic data +#. TSO data +#. coronagraphic data +#. spectroscopic data This allows the outlier_detection step to be tuned to the variations in each type of JWST data. diff --git a/docs/jwst/outlier_detection/outlier_detection.rst b/docs/jwst/outlier_detection/outlier_detection.rst index d443a9b6bd..ae4d870753 100644 --- a/docs/jwst/outlier_detection/outlier_detection.rst +++ b/docs/jwst/outlier_detection/outlier_detection.rst @@ -72,9 +72,6 @@ Specifically, this routine performs the following operations: * The ``maskpt`` parameter sets the percentage of the weight image values to use, and any pixel with a weight below this value gets flagged as "bad" and ignored when resampled. - * The ``grow`` parameter sets the width, in pixels, beyond the limit set by - the rejection algorithm being used, for additional pixels to be rejected in - an image. * The median image is written out to disk as `__median.fits` by default. #. By default, the median image is blotted back (inverse of resampling) to diff --git a/docs/jwst/outlier_detection/outlier_detection_spec.rst b/docs/jwst/outlier_detection/outlier_detection_spec.rst index a3ee88e0fc..c1570c7257 100644 --- a/docs/jwst/outlier_detection/outlier_detection_spec.rst +++ b/docs/jwst/outlier_detection/outlier_detection_spec.rst @@ -11,45 +11,40 @@ spectroscopic observations. Specifically, this routine performs the following operations (modified from the :ref:`Default Outlier Detection Algorithm ` ): -* Extract parameter settings from input model and merge them with any user-provided values - - - the same set of parameters available to: - ref:`Default Outlier Detection Algorithm ` - also applies to this code - -* Convert input data, as needed, to make sure it is in a format that can be processed - - - A :py:class:`~jwst.datamodels.ModelContainer` serves as the basic format - for all processing performed by - this step, as each entry will be treated as an element of a stack of images - to be processed to identify bad pixels, cosmic-rays and other artifacts - - If the input data is a :py:class:`~jwst.datamodels.CubeModel`, convert it into a - :py:class:`~jwst.datamodels.ModelContainer`. - This allows each plane of the cube to be treated as a separate 2D image - for resampling (if done) and for combining into a median image. - -* Resample all input images into a :py:class:`~jwst.datamodels.ModelContainer` using - :py:class:`~jwst.resample.resample_spec.ResampleSpecData` - - - Resampled images are written out to disk if the ``save_intermediate_results`` - parameter is set to `True` - - **If resampling is turned off**, the original unrectified inputs are used to create - the median image for cosmic-ray detection - -* Create a median image from (possibly) resampled :py:class:`~jwst.datamodels.ModelContainer` - - - The median image is written out to disk if the ``save_intermediate_results`` - parameter is set to `True` - -* Blot median image to match each original input image - - - Resampled/blotted images are written out to disk if the ``save_intermediate_results`` - parameter is set to `True` - - **If resampling is turned off**, the median image is used for comparison - with the original input models for detecting outliers - -* Perform statistical comparison between blotted image and original image to identify outliers -* Update input data model DQ arrays with mask of detected outliers +#. Extract parameter settings from input model and merge them with any user-provided values + + - the same set of parameters available to: + ref:`Default Outlier Detection Algorithm ` + also applies to this code +#. Convert input data, as needed, to make sure it is in a format that can be processed + + - A :py:class:`~jwst.datamodels.ModelContainer` serves as the basic format + for all processing performed by + this step, as each entry will be treated as an element of a stack of images + to be processed to identify bad pixels, cosmic-rays and other artifacts + - If the input data is a :py:class:`~jwst.datamodels.CubeModel`, convert it into a + :py:class:`~jwst.datamodels.ModelContainer`. + This allows each plane of the cube to be treated as a separate 2D image + for resampling (if done) and for combining into a median image. +#. Resample all input images into a :py:class:`~jwst.datamodels.ModelContainer` using + :py:class:`~jwst.resample.resample_spec.ResampleSpecData` + + - Resampled images are written out to disk if the ``save_intermediate_results`` + parameter is set to `True` + - **If resampling is turned off**, the original unrectified inputs are used to create + the median image for cosmic-ray detection +#. Create a median image from (possibly) resampled :py:class:`~jwst.datamodels.ModelContainer` + + - The median image is written out to disk if the ``save_intermediate_results`` + parameter is set to `True` +#. Blot median image to match each original input image + + - Resampled/blotted images are written out to disk if the ``save_intermediate_results`` + parameter is set to `True` + - **If resampling is turned off**, the median image is used for comparison + with the original input models for detecting outliers +#. Perform statistical comparison between blotted image and original image to identify outliers +#. Update input data model DQ arrays with mask of detected outliers .. automodapi:: jwst.outlier_detection.outlier_detection_spec diff --git a/docs/jwst/outlier_detection/outlier_detection_step.rst b/docs/jwst/outlier_detection/outlier_detection_step.rst index b4e72f5fdc..3d3338f9ac 100644 --- a/docs/jwst/outlier_detection/outlier_detection_step.rst +++ b/docs/jwst/outlier_detection/outlier_detection_step.rst @@ -8,46 +8,46 @@ detection on JWST observations. The ``outlier_detection`` step supports multipl algorithms and determines the appropriate algorithm for the type of observation being processed. This step supports: -* **Image modes**: 'FGS_IMAGE', 'MIR_IMAGE', 'NRC_IMAGE', 'NIS_IMAGE' -* **Spectroscopic modes**: 'MIR_LRS-FIXEDSLIT', 'NRS_FIXEDSLIT', 'NRS_MSASPEC' -* **Time-Series-Observation(TSO) Spectroscopic modes**: 'MIR_LRS-SLITLESS', 'NRC_TSGRISM', 'NIS_SOSS', 'NRS_BRIGHTOBJ' -* **IFU Spectroscopic modes**: 'MIR_MRS', 'NRS_IFU' -* **TSO Image modes**: 'NRC_TSIMAGE' -* **Coronagraphic Image modes**: 'MIR_LYOT', 'MIR_4QPM', 'NRC_CORON' +#. **Image modes**: 'FGS_IMAGE', 'MIR_IMAGE', 'NRC_IMAGE', 'NIS_IMAGE' +#. **Spectroscopic modes**: 'MIR_LRS-FIXEDSLIT', 'NRS_FIXEDSLIT', 'NRS_MSASPEC' +#. **Time-Series-Observation(TSO) Spectroscopic modes**: 'MIR_LRS-SLITLESS', 'NRC_TSGRISM', 'NIS_SOSS', 'NRS_BRIGHTOBJ' +#. **IFU Spectroscopic modes**: 'MIR_MRS', 'NRS_IFU' +#. **TSO Image modes**: 'NRC_TSIMAGE' +#. **Coronagraphic Image modes**: 'MIR_LYOT', 'MIR_4QPM', 'NRC_CORON' This step uses the following logic to apply the appropriate algorithm to the input data: -* Interpret inputs (ASN table, ModelContainer or CubeModel) - to identify all input observations to be processed +#. Interpret inputs (ASN table, ModelContainer or CubeModel) + to identify all input observations to be processed -* Read in type of exposures in input by interpreting ``meta.exposure.type`` from inputs +#. Read in type of exposures in input by interpreting ``meta.exposure.type`` from inputs -* Read in parameters set by user +#. Read in parameters set by user -* Select outlier detection algorithm based on exposure type +#. Select outlier detection algorithm based on exposure type - - **Images**: like those taken with NIRCam, will use - :py:class:`~jwst.outlier_detection.outlier_detection.OutlierDetection` as described - in :ref:`outlier-detection-imaging` - - **Coronagraphic observations**: - use :py:class:`~jwst.outlier_detection.outlier_detection.OutlierDetection` with - resampling turned off as described in :ref:`outlier-detection-imaging` - - **Time-Series Observations(TSO)**: both imaging and spectroscopic modes, use - :py:class:`~jwst.outlier_detection.outlier_detection.OutlierDetection` with resampling - turned off as described in :ref:`outlier-detection-imaging` - - **IFU observations**: use - :py:class:`~jwst.outlier_detection.outlier_detection_ifu.OutlierDetectionIFU` as - described in :ref:`outlier-detection-ifu` - - **Long-slit spectroscopic observations**: use - :py:class:`~jwst.outlier_detection.outlier_detection_spec.OutlierDetectionSpec` as - described in :ref:`outlier-detection-spec` + - **Images**: like those taken with NIRCam, will use + :py:class:`~jwst.outlier_detection.outlier_detection.OutlierDetection` as described + in :ref:`outlier-detection-imaging` + - **Coronagraphic observations**: + use :py:class:`~jwst.outlier_detection.outlier_detection.OutlierDetection` with + resampling turned off as described in :ref:`outlier-detection-imaging` + - **Time-Series Observations(TSO)**: both imaging and spectroscopic modes, use + :py:class:`~jwst.outlier_detection.outlier_detection.OutlierDetection` with resampling + turned off as described in :ref:`outlier-detection-imaging` + - **IFU observations**: use + :py:class:`~jwst.outlier_detection.outlier_detection_ifu.OutlierDetectionIFU` as + described in :ref:`outlier-detection-ifu` + - **Long-slit spectroscopic observations**: use + :py:class:`~jwst.outlier_detection.outlier_detection_spec.OutlierDetectionSpec` as + described in :ref:`outlier-detection-spec` -* Instantiate and run outlier detection class determined for the exposure type - using parameter values interpreted from inputs. +#. Instantiate and run outlier detection class determined for the exposure type + using parameter values interpreted from inputs. -* Return input models with DQ arrays updated with flags for identified outliers +#. Return input models with DQ arrays updated with flags for identified outliers .. automodapi:: jwst.outlier_detection.outlier_detection_step diff --git a/docs/jwst/pipeline/calwebb_ami3.rst b/docs/jwst/pipeline/calwebb_ami3.rst index 5f65b07bc6..435490d8c0 100644 --- a/docs/jwst/pipeline/calwebb_ami3.rst +++ b/docs/jwst/pipeline/calwebb_ami3.rst @@ -25,12 +25,12 @@ The steps applied by the ``calwebb_ami3`` pipeline are shown below. When given an association file as input, which lists multiple science target and reference PSF exposures, the pipeline will: - - apply the :ref:`ami_analyze ` step to each input exposure +#. apply the :ref:`ami_analyze ` step to each input exposure independently, computing fringe parameters for each - - apply the :ref:`ami_average ` step to compute the average of the +#. apply the :ref:`ami_average ` step to compute the average of the :ref:`ami_analyze ` results for all of the science target exposures, and the average for all of the reference PSF results (if present) - - apply the :ref:`ami_normalize ` step to correct the average science +#. apply the :ref:`ami_normalize ` step to correct the average science target results using the average reference PSF results (if present) If no reference PSF target exposures are present in the input ASN file, the ``ami_normalize`` diff --git a/docs/jwst/ramp_fitting/arguments.rst b/docs/jwst/ramp_fitting/arguments.rst index e76d334088..c4e15b7793 100644 --- a/docs/jwst/ramp_fitting/arguments.rst +++ b/docs/jwst/ramp_fitting/arguments.rst @@ -16,12 +16,14 @@ The ramp fitting step has three optional arguments that can be set by the user: which will compute all values for the ramp the same as if the entire ramp were saturated. -* ``--maximum_cores``: The fraction of available cores that will be - used for multi-processing in this step. The default value is 'none' which does not use - multi-processing. The other options are 'quarter', 'half', and 'all'. Note that these - fractions refer to the total available cores and on most CPUs these include physical - and virtual cores. The clock time for the step is reduced - almost linearly by the number of physical cores used on all machines. For example, on an Intel CPU with - six real cores and 6 virtual cores setting maximum_cores to 'half' results in a - decrease of a factor of six in the clock time for the step to run. Depending on the system +* ``--maximum_cores``: The number of available cores that will be + used for multi-processing in this step. The default value is '1' which does not use + multi-processing. The other options are either an integer, 'quarter', 'half', and 'all'. + Note that these fractions refer to the total available cores and on most CPUs these include + physical and virtual cores. The clock time for the step is reduced almost linearly by the + number of physical cores used on all machines. For example, on an Intel CPU with + six real cores and six virtual cores, setting maximum_cores to 'half' results in a + decrease of a factor of six in the clock time for the step to run. Depending on the system, the clock time can also decrease even more with maximum_cores is set to 'all'. + Setting the number of cores to an integer can be useful when running on machines with a + large number of cores where the user is limited in how many cores they can use. diff --git a/docs/jwst/ramp_fitting/description.rst b/docs/jwst/ramp_fitting/description.rst index 5174cbe4a2..be91967ebd 100644 --- a/docs/jwst/ramp_fitting/description.rst +++ b/docs/jwst/ramp_fitting/description.rst @@ -10,11 +10,11 @@ is done using the "ordinary least squares" method. The fit is performed independently for each pixel. There can be up to three output files created by the step: - - The primary output file ("rate") contains slope and other results at +#. The primary output file ("rate") contains slope and other results at each pixel averaged over all integrations in the exposure. - - The secondary product ("rateints") contains slope and other results for +#. The secondary product ("rateints") contains slope and other results for each integration, stored as data cubes. - - A third, and optional, output product is also available, containing detailed +#. A third, and optional, output product is also available, containing detailed fit information for each ramp segment for each pixel. The three types of output products are described in more detail below. diff --git a/docs/jwst/references_general/distortion_reffile.rst b/docs/jwst/references_general/distortion_reffile.rst index caafbba7cc..124170ca8e 100644 --- a/docs/jwst/references_general/distortion_reffile.rst +++ b/docs/jwst/references_general/distortion_reffile.rst @@ -29,17 +29,17 @@ Reference File Format +++++++++++++++++++++ The distortion reference file contains a combination of astropy models, representing the transform from detector to the telescope V2, V3 system. -The following convention was adopted: - -- The output in the V2, V3 system is in units of arcsec. -- The input x and y are 0-based coordinates in the DMS system. -- The center of the first pixel is (0, 0), so the first pixel goes from -0.5 to 0.5. -- The origin of the transform is taken to be (0, 0). - Note, that while a different origin can be used for some transforms the relevant - offset should first be prepended to the distortion transform to account for the change - in origin of the coordinate frame. For instance, MIRI takes input in (0, 0) - indexed - detector pixel coordinates, but shifts these around prior to calling transforms that are - defined with respect to science-frame pixels that omit reference pixels. +The following conventions have been adopted: + +#. The output in the V2, V3 system is in units of arcsec. +#. The input x and y are 0-based coordinates in the DMS system. +#. The center of the first pixel is (0, 0), so the first pixel goes from -0.5 to 0.5. +#. The origin of the transform is taken to be (0, 0). + Note, that while a different origin can be used for some transforms the relevant + offset should first be prepended to the distortion transform to account for the change + in origin of the coordinate frame. For instance, MIRI takes input in (0,0)-indexed + detector pixel coordinates, but shifts these around prior to calling transforms that are + defined with respect to science-frame pixels that omit reference pixels. Internally the WCS pipeline works with 0-based coordinates. diff --git a/docs/jwst/references_general/emi_reffile.inc b/docs/jwst/references_general/emi_reffile.inc index 6073af125b..43bbab02bf 100644 --- a/docs/jwst/references_general/emi_reffile.inc +++ b/docs/jwst/references_general/emi_reffile.inc @@ -1,22 +1,22 @@ -.. _emi_reffile: +.. _emicorr_reffile: -EMI Reference File ------------------- +EMICORR Reference File +---------------------- -:REFTYPE: EMI +:REFTYPE: EMICORR :Data model: `~jwst.datamodels.EmiModel` -The EMI reference file contains data necessary for removing +The EMICORR reference file contains data necessary for removing contaminating MIRI EMI frequencies. .. include:: ../references_general/emi_selection.inc .. include:: ../includes/standard_keywords.inc -EMI Reference File Format -+++++++++++++++++++++++++ +EMICORR Reference File Format ++++++++++++++++++++++++++++++ -MIRI EMI reference files are in ASDF format. The EMI +MIRI EMICORR reference files are in ASDF format. The EMICORR reference file contains the frequencies for which the image will be corrected. Example file contents:: diff --git a/docs/jwst/references_general/explain_rmaps.rst b/docs/jwst/references_general/explain_rmaps.rst index bdbad93b25..6e15b84f7e 100644 --- a/docs/jwst/references_general/explain_rmaps.rst +++ b/docs/jwst/references_general/explain_rmaps.rst @@ -304,9 +304,9 @@ When CRDS adds a reference file to a .rmap, it uses literal matching between the value defined in the reference file and the existing values shown in the .rmap. This enables CRDS to: -* add files to existing categories -* replace files in existing categories -* create new categories of files. +#. add files to existing categories +#. replace files in existing categories +#. create new categories of files. Because creating new categories is an unusual event which should be carefully reviewed, CRDS issues a warning when a reference file defines a new category. diff --git a/docs/jwst/references_general/references_general.rst b/docs/jwst/references_general/references_general.rst index d2c5927f97..a007a39e87 100644 --- a/docs/jwst/references_general/references_general.rst +++ b/docs/jwst/references_general/references_general.rst @@ -94,9 +94,19 @@ documentation on each reference file. +-----------------------------------------------+--------------------------------------------------+ | :ref:`dq_init ` | :ref:`MASK ` | +-----------------------------------------------+--------------------------------------------------+ +| :ref:`emicorr ` | :ref:`EMICORR ` | ++-----------------------------------------------+--------------------------------------------------+ | :ref:`extract_1d ` | :ref:`EXTRACT1D ` | + +--------------------------------------------------+ | | :ref:`APCORR ` | ++ +--------------------------------------------------+ +| | SPECKERNEL (NIRISS SOSS ATOCA only) | ++ +--------------------------------------------------+ +| | SPECPROFILE (NIRISS SOSS ATOCA only) | ++ +--------------------------------------------------+ +| | SPECTRACE (NIRISS SOSS ATOCA only) | ++ +--------------------------------------------------+ +| | WAVEMAP (NIRISS SOSS ATOCA only) | +-----------------------------------------------+--------------------------------------------------+ | :ref:`extract_2d ` | :ref:`WAVECORR ` | + +--------------------------------------------------+ @@ -198,6 +208,8 @@ documentation on each reference file. +--------------------------------------------------+-----------------------------------------------+ | :ref:`DRIZPARS ` | :ref:`resample ` | +--------------------------------------------------+-----------------------------------------------+ +| :ref:`EMICORR ` | :ref:`emicorr ` | ++--------------------------------------------------+-----------------------------------------------+ | :ref:`EXTRACT1D ` | :ref:`extract_1d ` | +--------------------------------------------------+-----------------------------------------------+ | :ref:`FFLAT ` | :ref:`flatfield ` | @@ -454,7 +466,6 @@ S_BKDSUB Background subtraction S_COMB1D 1-D spectral combination S_DARK Dark subtraction S_DQINIT DQ initialization -S_ERRINI ERR initialization S_EXTR1D 1-D spectral extraction S_EXTR2D 2-D spectral extraction S_FLAT Flat field correction @@ -470,6 +481,7 @@ S_JUMP Jump detection S_KLIP Coronagraphic PSF subtraction S_LASTFR MIRI last frame correction S_LINEAR Linearity correction +S_MIREMI MIRI EMI correction S_MRSMAT MIRI MRS background matching S_MSAFLG NIRSpec MSA failed shutter flagging S_OUTLIR Outlier detection @@ -663,18 +675,18 @@ For example, all the following specifications are equivalent: `"12" == "4+8" == "4, 8" == "JUMP_DET, DROPOUT"` .. note:: - - The default value (0) will make *all* non-zero - pixels in the DQ mask be considered "bad" pixels and the - corresponding pixels will not be used in computations. - - - Setting to `None` will turn off the use of the DQ array - for computations. - - - In order to reverse the meaning of the flags - from indicating values of the "good" DQ flags - to indicating the "bad" DQ flags, prepend '~' to the string - value. For example, in order to exclude pixels with - DQ flags 4 and 8 for computations and to consider - as "good" all other pixels (regardless of their DQ flag), - use a value of ``~4+8``, or ``~4,8``. A string value of - ``~0`` would be equivalent to a setting of ``None``. + The default value (0) will make *all* non-zero + pixels in the DQ mask be considered "bad" pixels and the + corresponding pixels will not be used in computations. + + Setting to `None` will turn off the use of the DQ array + for computations. + + In order to reverse the meaning of the flags + from indicating values of the "good" DQ flags + to indicating the "bad" DQ flags, prepend '~' to the string + value. For example, in order to exclude pixels with + DQ flags 4 and 8 for computations and to consider + as "good" all other pixels (regardless of their DQ flag), + use a value of ``~4+8``, or ``~4,8``. A string value of + ``~0`` would be equivalent to a setting of ``None``. diff --git a/docs/jwst/resample/arguments.rst b/docs/jwst/resample/arguments.rst index ac597eacae..748d3071eb 100644 --- a/docs/jwst/resample/arguments.rst +++ b/docs/jwst/resample/arguments.rst @@ -115,3 +115,8 @@ image. For example, if set to ``0.5``, only resampled images that use less than half the available memory can be created. + +``--in_memory`` (boolean, default=True) + Specifies whether or not to load and create all images that are used during + processing into memory. If ``False``, input files are loaded from disk when + needed and all intermediate files are stored on disk, rather than in memory. diff --git a/docs/jwst/resample/main.rst b/docs/jwst/resample/main.rst index 62139055ee..e0008f0cd6 100644 --- a/docs/jwst/resample/main.rst +++ b/docs/jwst/resample/main.rst @@ -12,8 +12,8 @@ been incorporated into the image using the The ``resample`` step can take as input either: - * a single 2D input image - * an association table (in json format) +#. a single 2D input image +#. an association table (in json format) The defined parameters for the drizzle operation itself get provided by the DRIZPARS reference file (from CRDS). The exact values diff --git a/docs/jwst/residual_fringe/main.rst b/docs/jwst/residual_fringe/main.rst index 3d1048588b..e23f8b61b6 100644 --- a/docs/jwst/residual_fringe/main.rst +++ b/docs/jwst/residual_fringe/main.rst @@ -20,11 +20,11 @@ it is skipped by default. To apply this step set the step parameter, ``--skip = The ``residual_fringe`` step can accept several different forms of input data, including: - - a single file containing a 2-D IFU image +#. a single file containing a 2-D IFU image - - a data model (IFUImageModel) containing a 2-D IFU image +#. a data model (`~jwst.datamodels.IFUImageModel`) containing a 2-D IFU image - - an association table (in json format) containing a single input file +#. an association table (in json format) containing a single input file Assumptions diff --git a/docs/jwst/saturation/description.rst b/docs/jwst/saturation/description.rst index 33e52ab660..c5ea5193cc 100644 --- a/docs/jwst/saturation/description.rst +++ b/docs/jwst/saturation/description.rst @@ -60,26 +60,26 @@ handling in this step, due to the extra reference pixel values that are interlea within the science data. The saturation reference file data does not contain extra entries for these pixels. The step-by-step process is as follows: -- Retrieve and load data from the appropriate "SATURATION" reference file from CRDS +#. Retrieve and load data from the appropriate "SATURATION" reference file from CRDS -- If the input science exposure used the NIRSpec IRS2 readout pattern: +#. If the input science exposure used the NIRSpec IRS2 readout pattern: - * Create a temporary saturation array that is the same size as the IRS2 readout + * Create a temporary saturation array that is the same size as the IRS2 readout - * Copy the saturation threshold values from the original reference data into - the larger saturation array, skipping over the interleaved reference pixel - locations within the array + * Copy the saturation threshold values from the original reference data into + the larger saturation array, skipping over the interleaved reference pixel + locations within the array -- If the input science exposure used a subarray readout, extract the matching - subarray from the full-frame saturation reference file data +#. If the input science exposure used a subarray readout, extract the matching + subarray from the full-frame saturation reference file data -- For pixels that contain NaN in the reference file saturation threshold array - or are flagged in the reference file with "NO_SAT_CHECK" (no saturation check - available), propagate the "NO_SAT_CHECK" flag to the science data PIXELDQ array +#. For pixels that contain NaN in the reference file saturation threshold array + or are flagged in the reference file with "NO_SAT_CHECK" (no saturation check + available), propagate the "NO_SAT_CHECK" flag to the science data PIXELDQ array -- For each group in the input science data, set the "SATURATION" flag in the - "GROUPDQ" array if the pixel value is greater than or equal to the saturation - threshold from the reference file +#. For each group in the input science data, set the "SATURATION" flag in the + "GROUPDQ" array if the pixel value is greater than or equal to the saturation + threshold from the reference file NIRCam Frame 0 -------------- diff --git a/docs/jwst/skymatch/description.rst b/docs/jwst/skymatch/description.rst index 5527fdc666..8076a574bb 100644 --- a/docs/jwst/skymatch/description.rst +++ b/docs/jwst/skymatch/description.rst @@ -35,11 +35,14 @@ true total sky level. The step records information in three keywords that are included in the output files: -- BKGMETH: records the sky method that was used to compute sky levels +BKGMETH + records the sky method that was used to compute sky levels -- BKGLEVEL: the sky level computed for each image +BKGLEVEL + the sky level computed for each image -- BKGSUB: a boolean indicating whether or not the sky was subtracted from the +BKGSUB + a boolean indicating whether or not the sky was subtracted from the output images. Note that by default the step argument "subtract" is set to ``False``, which means that the sky will *NOT* be subtracted (see the :ref:`skymatch step arguments ` for more details). @@ -75,33 +78,33 @@ In addition to the classic "local" method, two other methods have been introduced: "global" and "match", as well as a combination of the two -- "global+match". -- The "global" method essentially uses the "local" method to first compute a - sky value for each image separately, and then assigns the minimum of those - results to all images in the collection. Hence after subtraction of the - sky values only one image will have a net sky of zero, while the remaining - images will have some small positive residual. - -- The "match" algorithm computes only a correction value for each image, such - that, when applied to each image, the mismatch between *all* pairs of images - is minimized, in the least-squares sense. For each pair of images, the sky - mismatch is computed *only* in the regions in which the two images overlap - on the sky. - - This makes the "match" algorithm particularly useful - for equalizing sky values in large mosaics in which one may have - only pair-wise intersection of adjacent images without having - a common intersection region (on the sky) in all images. - - Note that if the argument "match_down=True", matching will be done to the image - with the lowest sky value, and if "match_down=False" it will be done to the - image with the highest value - (see :ref:`skymatch step arguments ` for full details). - -- The "global+match" algorithm combines the "global" and "match" methods. - It uses the "global" algorithm to find a baseline sky value common to all - input images and the "match" algorithm to equalize sky values among images. - The direction of matching (to the lowest or highest) is again controlled by - the "match_down" argument. +#. The "global" method essentially uses the "local" method to first compute a + sky value for each image separately, and then assigns the minimum of those + results to all images in the collection. Hence after subtraction of the + sky values only one image will have a net sky of zero, while the remaining + images will have some small positive residual. + +#. The "match" algorithm computes only a correction value for each image, such + that, when applied to each image, the mismatch between *all* pairs of images + is minimized, in the least-squares sense. For each pair of images, the sky + mismatch is computed *only* in the regions in which the two images overlap + on the sky. + + This makes the "match" algorithm particularly useful + for equalizing sky values in large mosaics in which one may have + only pair-wise intersection of adjacent images without having + a common intersection region (on the sky) in all images. + + Note that if the argument "match_down=True", matching will be done to the image + with the lowest sky value, and if "match_down=False" it will be done to the + image with the highest value + (see :ref:`skymatch step arguments ` for full details). + +#. The "global+match" algorithm combines the "global" and "match" methods. + It uses the "global" algorithm to find a baseline sky value common to all + input images and the "match" algorithm to equalize sky values among images. + The direction of matching (to the lowest or highest) is again controlled by + the "match_down" argument. In the "local" and "global" methods, which find sky levels in each image, the calculation of the image statistics takes advantage of sigma clipping @@ -146,11 +149,15 @@ of course not be so exact. | 115 | 115 | 100 | 15 | 115 | +-------+-------+--------+-------+--------------+ -- "local" finds the sky level of each image independently of the rest. -- "global" uses the minimum sky level found by "local" and applies it to all images. -- "match" with "match_down=True" finds the offset needed to match all images +local + finds the sky level of each image independently of the rest. +global + uses the minimum sky level found by "local" and applies it to all images. +match + with "match_down=True" finds the offset needed to match all images to the level of the image with the lowest sky level. -- "global+match" with "match_down=True" finds the offsets and global value +global+match + with "match_down=True" finds the offsets and global value needed to set all images to a sky level of zero. In this trivial example, the results are identical to the "local" method. @@ -217,34 +224,34 @@ the computed "sky" may be the surface brightness of a large galaxy, nebula, etc. Here is a brief list of possible limitations and factors that can affect the outcome of the matching (sky subtraction in general) algorithm: -- Because sky computation is performed on *flat-fielded* but - *not distortion corrected* images, it is important to keep in mind - that flat-fielding is performed to obtain correct surface brightnesses. - Because the surface brightness of a pixel containing a point-like source will - change inversely with a change to the pixel area, it is advisable to - mask point-like sources through user-supplied mask files. Values - different from zero in user-supplied masks indicate good data pixels. - Alternatively, one can use the ``upper`` parameter to exclude the use of - pixels containing bright objects when performing the sky computations. - -- The input images may contain cosmic rays. This - algorithm does not perform CR cleaning. A possible way of minimizing - the effect of the cosmic rays on sky computations is to use - clipping (\ ``nclip`` > 0) and/or set the ``upper`` parameter to a value - larger than most of the sky background (or extended sources) but - lower than the values of most CR-affected pixels. - -- In general, clipping is a good way of eliminating bad pixels: - pixels affected by CR, hot/dead pixels, etc. However, for - images with complicated backgrounds (extended galaxies, nebulae, - etc.), affected by CR and noise, the clipping process may mask different - pixels in different images. If variations in the background are - too strong, clipping may converge to different sky values in - different images even when factoring in the true difference - in the sky background between the two images. - -- In general images can have different true background values - (we could measure it if images were not affected by large diffuse - sources). However, arguments such as ``lower`` and ``upper`` will - apply to all images regardless of the intrinsic differences - in sky levels (see :ref:`skymatch step arguments `). +#. Because sky computation is performed on *flat-fielded* but + *not distortion corrected* images, it is important to keep in mind + that flat-fielding is performed to obtain correct surface brightnesses. + Because the surface brightness of a pixel containing a point-like source will + change inversely with a change to the pixel area, it is advisable to + mask point-like sources through user-supplied mask files. Values + different from zero in user-supplied masks indicate good data pixels. + Alternatively, one can use the ``upper`` parameter to exclude the use of + pixels containing bright objects when performing the sky computations. + +#. The input images may contain cosmic rays. This + algorithm does not perform CR cleaning. A possible way of minimizing + the effect of the cosmic rays on sky computations is to use + clipping (\ ``nclip`` > 0) and/or set the ``upper`` parameter to a value + larger than most of the sky background (or extended sources) but + lower than the values of most CR-affected pixels. + +#. In general, clipping is a good way of eliminating bad pixels: + pixels affected by CR, hot/dead pixels, etc. However, for + images with complicated backgrounds (extended galaxies, nebulae, + etc.), affected by CR and noise, the clipping process may mask different + pixels in different images. If variations in the background are + too strong, clipping may converge to different sky values in + different images even when factoring in the true difference + in the sky background between the two images. + +#. In general images can have different true background values + (we could measure it if images were not affected by large diffuse + sources). However, arguments such as ``lower`` and ``upper`` will + apply to all images regardless of the intrinsic differences + in sky levels (see :ref:`skymatch step arguments `). diff --git a/docs/jwst/srctype/description.rst b/docs/jwst/srctype/description.rst index 835ba25839..d2aca0f8b8 100644 --- a/docs/jwst/srctype/description.rst +++ b/docs/jwst/srctype/description.rst @@ -37,7 +37,7 @@ is set to "UNKNOWN", the step determines a suitable value based on the observing mode, command line input, and other characteristics of the exposure. The following choices are used, in order of priority: - - The source type can be specified by the user on the command line. +#. The source type can be specified by the user on the command line. Exposure types for which this is permitted contain a single pre-defined target, i.e. MIR_LRS-FIXEDSLIT, MIR_LRS-SLITLESS, MIR_MRS,NRC_TSGRISM, NRS_FIXEDSLIT, NRS_BRIGHTOBJ, and NRS_IFU. Other EXP_TYPEs will be @@ -46,15 +46,15 @@ exposure. The following choices are used, in order of priority: will retain their default settings of "EXTENDED" (which is appropriate for sky background). - - Background target exposures default to a source type of "EXTENDED." +#. Background target exposures default to a source type of "EXTENDED." Background exposures are identified by the keyword "BKGDTARG" set to True. - - TSO exposures default to a source type of "POINT." TSO exposures are +#. TSO exposures default to a source type of "POINT." TSO exposures are identified by EXP_TYPE="NRC_TSGRISM" or "NRS_BRIGHTOBJ", or TSOVISIT=True. - - Exposures that are part of a nodded dither pattern, which are assumed +#. Exposures that are part of a nodded dither pattern, which are assumed to only be used with point-like targets, default to a source type of "POINT." Nodded exposures are usually identified by the "PATTTYPE" keyword either being set to a value of "POINT-SOURCE" or containing the @@ -63,7 +63,7 @@ exposure. The following choices are used, in order of priority: "PATTTYPE". If it has a value of "POINT-SOURCE", the source type is set to "POINT". - - If none of the above conditions apply, and the user did not choose a +#. If none of the above conditions apply, and the user did not choose a value in the APT, the following table of defaults is used, based on the "EXP_TYPE" keyword value: diff --git a/docs/jwst/stpipe/user_logging.rst b/docs/jwst/stpipe/user_logging.rst index 9ffae7cd99..9c42b5433c 100644 --- a/docs/jwst/stpipe/user_logging.rst +++ b/docs/jwst/stpipe/user_logging.rst @@ -8,11 +8,11 @@ Log messages are emitted from each Step at different levels of importance. The levels used are the standard ones for Python (from least important to most important: - - DEBUG - - INFO - - WARNING - - ERROR - - CRITICAL +#. DEBUG +#. INFO +#. WARNING +#. ERROR +#. CRITICAL By default, only messages of type WARNING or higher are displayed. This can be controlled by providing a logging configuration file. @@ -27,18 +27,18 @@ A logging configuration file is searched for in the following places. The first one found is used *in its entirety* and all others are ignored: - - The file specified with the ``--logcfg`` option to the - ``strun`` script. +#. The file specified with the ``--logcfg`` option to the + ``strun`` script. - - The file specified with the ``logcfg`` keyword to a - .call() execution of a Step or Pipeline. +#. The file specified with the ``logcfg`` keyword to a + .call() execution of a Step or Pipeline. - - A file called ``stpipe-log.cfg`` in the current working - directory. +#. A file called ``stpipe-log.cfg`` in the current working + directory. - - ``~/stpipe-log.cfg`` +#. ``~/stpipe-log.cfg`` - - ``/etc/stpipe-log.cfg`` +#. ``/etc/stpipe-log.cfg`` The logging configuration file is in the standard ini-file format. @@ -53,36 +53,36 @@ substeps of a step called ``MyStep``, call the section In each section, the following may be configured: - - ``level``: The level at and above which logging messages will be - displayed. May be one of (from least important to most - important): DEBUG, INFO, WARNING, ERROR or CRITICAL. +#. ``level``: The level at and above which logging messages will be + displayed. May be one of (from least important to most + important): DEBUG, INFO, WARNING, ERROR or CRITICAL. - - ``break_level``: The level at and above which logging messages - will cause an exception to be raised. For instance, if you - would rather stop execution at the first ERROR message (rather - than continue), set ``break_level`` to ``ERROR``. +#. ``break_level``: The level at and above which logging messages + will cause an exception to be raised. For instance, if you + would rather stop execution at the first ERROR message (rather + than continue), set ``break_level`` to ``ERROR``. - - ``handler``: Defines where log messages are to be sent. By - default, they are sent to stderr. However, one may also - specify: +#. ``handler``: Defines where log messages are to be sent. By + default, they are sent to stderr. However, one may also + specify: - - ``file:filename.log`` to send the log messages to the given - file. + - ``file:filename.log`` to send the log messages to the given + file. - - ``append:filename.log`` to append the log messages to the - given file. This is useful over ``file`` if multiple - processes may need to write to the same log file. + - ``append:filename.log`` to append the log messages to the + given file. This is useful over ``file`` if multiple + processes may need to write to the same log file. - - ``stdout`` to send log messages to stdout. + - ``stdout`` to send log messages to stdout. - Multiple handlers may be specified by putting the whole value in - quotes and separating the entries with a comma. + Multiple handlers may be specified by putting the whole value in + quotes and separating the entries with a comma. - - ``format``: Allows one to customize what each log message - contains. What this string may contain is described in the - `logging module LogRecord Attributes - `_ - section of the Python standard library. +#. ``format``: Allows one to customize what each log message + contains. What this string may contain is described in the + `logging module LogRecord Attributes + `_ + section of the Python standard library. Examples ======== diff --git a/docs/jwst/user_documentation/parameters.rst b/docs/jwst/user_documentation/parameters.rst index 3a11a86c37..26dfe3eff1 100644 --- a/docs/jwst/user_documentation/parameters.rst +++ b/docs/jwst/user_documentation/parameters.rst @@ -53,11 +53,11 @@ output data model from one step to the input of the next step, without saving any intermediate results to disk. If you want to save the results from individual steps, you have two options: - - Specify ``save_results``. +#. Specify ``save_results``. This option will save the results of the step, using a filename created by the step. - - Specify a file name using ``output_file ``. +#. Specify a file name using ``output_file ``. This option will save the step results using the name specified. To do this using the Python pipeline interface, see .. _python_output_file:. To do diff --git a/jwst/lib/tests/test_engdb_mock.py b/jwst/lib/tests/test_engdb_mock.py index 11cb6e36b1..fe0e20bacb 100644 --- a/jwst/lib/tests/test_engdb_mock.py +++ b/jwst/lib/tests/test_engdb_mock.py @@ -85,6 +85,7 @@ def test_cache_data(db_cache, engdb): assert data == live_data +@pytest.mark.xfail(reason="may have been a change in EngDB coverage") def test_cache_partial_data(db_cache, engdb): """ Test read of some data. @@ -109,6 +110,7 @@ def test_cache_partial_data(db_cache, engdb): assert data_short == live_data_short +@pytest.mark.xfail(reason="may have been a change in EngDB coverage") def test_cache_end_data(db_cache, engdb): """ Test read of some data. diff --git a/jwst/outlier_detection/outlier_detection_scaled_step.py b/jwst/outlier_detection/outlier_detection_scaled_step.py index 2a6a91cfd0..62468ec679 100755 --- a/jwst/outlier_detection/outlier_detection_scaled_step.py +++ b/jwst/outlier_detection/outlier_detection_scaled_step.py @@ -28,7 +28,6 @@ class OutlierDetectionScaledStep(Step): nlow = integer(default=0) nhigh = integer(default=0) maskpt = float(default=0.7) - grow = integer(default=1) snr = string(default='4.0 3.0') scale = string(default='0.5 0.4') backg = float(default=0.0) @@ -59,7 +58,6 @@ def process(self, input): 'nlow': self.nlow, 'nhigh': self.nhigh, 'maskpt': self.maskpt, - 'grow': self.grow, 'snr': self.snr, 'scale': self.scale, 'backg': self.backg, diff --git a/jwst/outlier_detection/outlier_detection_stack_step.py b/jwst/outlier_detection/outlier_detection_stack_step.py index 930df84cdb..099cbac033 100644 --- a/jwst/outlier_detection/outlier_detection_stack_step.py +++ b/jwst/outlier_detection/outlier_detection_stack_step.py @@ -40,7 +40,6 @@ class OutlierDetectionStackStep(Step): nlow = integer(default=0) nhigh = integer(default=0) maskpt = float(default=0.7) - grow = integer(default=1) snr = string(default='4.0 3.0') scale = string(default='0.5 0.4') backg = float(default=0.0) @@ -74,7 +73,6 @@ def process(self, input): 'nlow': self.nlow, 'nhigh': self.nhigh, 'maskpt': self.maskpt, - 'grow': self.grow, 'snr': self.snr, 'scale': self.scale, 'backg': self.backg, diff --git a/jwst/outlier_detection/outlier_detection_step.py b/jwst/outlier_detection/outlier_detection_step.py index 741421c640..f0fa47768f 100644 --- a/jwst/outlier_detection/outlier_detection_step.py +++ b/jwst/outlier_detection/outlier_detection_step.py @@ -110,7 +110,6 @@ def process(self, input_data): 'nlow': self.nlow, 'nhigh': self.nhigh, 'maskpt': self.maskpt, - 'grow': self.grow, 'snr': self.snr, 'scale': self.scale, 'backg': self.backg, diff --git a/jwst/ramp_fitting/ramp_fit_step.py b/jwst/ramp_fitting/ramp_fit_step.py index a765aed29a..df7935d3d5 100644 --- a/jwst/ramp_fitting/ramp_fit_step.py +++ b/jwst/ramp_fitting/ramp_fit_step.py @@ -391,7 +391,7 @@ class RampFitStep(Step): save_opt = boolean(default=False) # Save optional output opt_name = string(default='') suppress_one_group = boolean(default=True) # Suppress saturated ramps with good 0th group - maximum_cores = option('none', 'quarter', 'half', 'all', default='none') # max number of processes to create + maximum_cores = string(default='1') # cores for multiprocessing. Can be an integer, 'half', 'quarter', or 'all' """ # Prior to 04/26/17, the following were also in the spec above: diff --git a/jwst/ramp_fitting/tests/test_ramp_fit_step.py b/jwst/ramp_fitting/tests/test_ramp_fit_step.py index 7ce4d3e2b3..dc2ebc1662 100644 --- a/jwst/ramp_fitting/tests/test_ramp_fit_step.py +++ b/jwst/ramp_fitting/tests/test_ramp_fit_step.py @@ -14,6 +14,8 @@ JUMP = test_dq_flags["JUMP_DET"] SAT = test_dq_flags["SATURATED"] +MAXIMUM_CORES = ['2', 'none', 'quarter', 'half', 'all'] + @pytest.fixture(scope="module") def generate_miri_reffiles(): @@ -140,7 +142,8 @@ def setup_subarray_inputs( return model1, gdq, rnModel, pixdq, err, gain -def test_ramp_fit_step(generate_miri_reffiles, setup_inputs): +@pytest.mark.parametrize("max_cores", MAXIMUM_CORES) +def test_ramp_fit_step(generate_miri_reffiles, setup_inputs, max_cores): """ Create a simple input to instantiate RampFitStep and execute a call to test the step class and class method. @@ -166,7 +169,7 @@ def test_ramp_fit_step(generate_miri_reffiles, setup_inputs): # Call ramp fit through the step class slopes, cube_model = RampFitStep.call( model, override_gain=override_gain, override_readnoise=override_readnoise, - maximum_cores="none") + maximum_cores=max_cores) assert slopes is not None assert cube_model is not None diff --git a/requirements-sdp.txt b/requirements-sdp.txt index 924db0b8ae..ecd96851e3 100644 --- a/requirements-sdp.txt +++ b/requirements-sdp.txt @@ -23,8 +23,8 @@ asdf-transform-schemas==0.4.0 asdf-unit-schemas==0.1.0 asdf-wcs-schemas==0.3.0 astropy==6.0.0 -astropy-iers-data==0.2023.12.18.0.30.18 -attrs==23.1.0 +astropy-iers-data==0.2024.1.1.0.33.39 +attrs==23.2.0 Babel==2.14.0 BayesicFitting==3.2.0 certifi==2023.11.17 @@ -32,7 +32,7 @@ charset-normalizer==3.3.2 ci-watson==0.6.2 colorama==0.4.6 contourpy==1.2.0 -coverage==7.3.4 +coverage==7.4.0 crds==11.17.14 cycler==0.12.1 docutils==0.20.1 @@ -45,27 +45,27 @@ gwcs==0.20.0 idna==3.6 imageio==2.33.1 imagesize==1.4.1 -importlib-metadata==7.0.0 +importlib-metadata==7.0.1 iniconfig==2.0.0 Jinja2==3.1.2 jmespath==1.0.1 jplephem==2.21 jsonschema==4.20.0 -jsonschema-specifications==2023.11.2 +jsonschema-specifications==2023.12.1 kiwisolver==1.4.5 lazy_loader==0.3 -lxml==4.9.4 +lxml==5.0.0 MarkupSafe==2.1.3 matplotlib==3.8.2 networkx==3.2.1 -numpy==1.26.2 +numpy==1.26.3 numpydoc==1.6.0 -opencv-python-headless==4.8.1.78 +opencv-python-headless==4.9.0.80 openpyxl==3.1.2 packaging==23.2 Parsley==1.3 photutils==1.10.0 -Pillow==10.1.0 +pillow==10.2.0 pluggy==1.3.0 poppy==1.1.1 psutil==5.9.7 @@ -73,7 +73,7 @@ pyerfa==2.0.1.1 Pygments==2.17.2 pyparsing==3.1.1 pysiaf==0.21.0 -pytest==7.4.3 +pytest==7.4.4 pytest-cov==4.1.0 pytest-doctestplus==1.1.0 python-dateutil==2.8.2 @@ -82,8 +82,8 @@ readchar==4.0.5 referencing==0.32.0 requests==2.31.0 requests-mock==1.11.0 -rpds-py==0.15.2 -ruff==0.1.8 +rpds-py==0.16.2 +ruff==0.1.11 scikit-image==0.22.0 scipy==1.11.4 semantic-version==2.10.0