From cf29a9002f546f0439a506e1ce618bf5f8c0ed62 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Thu, 29 Dec 2022 10:14:11 -0800 Subject: [PATCH 01/20] started implementing nirspec fit --- gsf/fitting.py | 8 ++++- gsf/maketmp_filt.py | 77 ++++++++++++++++++++++++++++++--------------- 2 files changed, 58 insertions(+), 27 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index cc54791..a07a69e 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -66,8 +66,14 @@ class Mainbody(): def __init__(self, inputs, c:float=3e18, Mpc_cm:float=3.08568025e+24, m0set:float=25.0, pixelscale:float=0.06, Lsun:float=3.839*1e33, cosmo=None, idman=None, zman=None, NRbb_lim=10000): + ''' + Parameters + ---------- + NRbb_lim : int + BB data is associated with ids greater than this number. + ''' self.update_input(inputs, idman=idman, zman=zman) - self.NRbb_lim = NRbb_lim # BB data is associated with ids greater than this number. + self.NRbb_lim = NRbb_lim def update_input(self, inputs, c:float=3e18, Mpc_cm:float=3.08568025e+24, m0set:float=25.0, pixelscale:float=0.06, Lsun:float=3.839*1e33, cosmo=None, diff --git a/gsf/maketmp_filt.py b/gsf/maketmp_filt.py index 5ea3261..c1f3325 100644 --- a/gsf/maketmp_filt.py +++ b/gsf/maketmp_filt.py @@ -10,6 +10,7 @@ from astropy.io import fits,ascii from astropy.modeling.models import Moffat1D from astropy.convolution import convolve, convolve_fft +import astropy.units as u from .function import * from .function_igm import * @@ -189,14 +190,19 @@ def check_library(MB, af, nround=3): return flag -def get_LSF(inputs, DIR_EXTR, ID, lm, c=3e18): +def get_LSF(inputs, DIR_EXTR, ID, lm, wave_repr=4000, c=3e18, + sig_temp_def=50.): ''' Gets Morphology params, and returns LSF + + Parameters + ---------- + lm : float array + ''' Amp = 0 f_morp = False if inputs['MORP'] == 'moffat' or inputs['MORP'] == 'gauss': - f_morp = True try: mor_file = inputs['MORP_FILE'].replace('$ID','%s'%(ID)) fm = ascii.read(DIR_EXTR + mor_file) @@ -206,6 +212,7 @@ def get_LSF(inputs, DIR_EXTR, ID, lm, c=3e18): alp = fm['alp'] else: alp = 0 + f_morp = True except Exception: print('Error in reading morphology params.') print('No morphology convolution.') @@ -221,12 +228,16 @@ def get_LSF(inputs, DIR_EXTR, ID, lm, c=3e18): sig_temp = float(inputs['SIG_TEMP']) print('Template is set to %.1f km/s.'%(sig_temp)) except: - sig_temp = 50. + sig_temp = sig_temp_def print('Template resolution is unknown.') print('Set to %.1f km/s.'%(sig_temp)) - iixlam = np.argmin(np.abs(lm-4000)) # Around 4000 AA - dellam = lm[iixlam+1] - lm[iixlam] # AA/pix + # @@@ Below assumes a constant R over wavelength range; + iixlam = np.argmin(np.abs(lm-wave_repr)) # Around 4000 AA + if iixlam == len(lm)-1: + dellam = lm[iixlam] - lm[iixlam-1] # AA/pix + else: + dellam = lm[iixlam+1] - lm[iixlam] # AA/pix R_temp = c / (sig_temp*1e3*1e10) sig_temp_pix = np.median(lm) / R_temp / dellam # delta v in pixel; sig_inst = 0 #65 #km/s for Manga @@ -403,44 +414,60 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, # Get ascii data. # MB.f_spec = False - try: - spec_files = inputs['SPEC_FILE'] - spec_files = [x.strip() for x in spec_files.split(',')] + if True:#try: + spec_files = [x.strip() for x in inputs['SPEC_FILE'].split(',')] ninp0 = np.zeros(len(spec_files), dtype='int') + + # THIS PART IS JUST TO GET THE TOTAL ARRAY NUMBER; for ff, spec_file in enumerate(spec_files): try: - fd0 = np.loadtxt(DIR_EXTR + spec_file, comments='#') - lm0tmp = fd0[:,0] - fobs0 = fd0[:,1] - eobs0 = fd0[:,2] - ninp0[ff] = len(lm0tmp) + if spec_file.split('.')[-1] == 'asdf': + id_asdf = int(spec_file.split('_')[2]) + fd0 = asdf.open(os.path.join(DIR_EXTR, spec_file)) + lm0tmp = fd0[id_asdf]['wavelength'].to(u.angstrom) + ninp0[ff] = len(lm0tmp) + else: + fd0 = np.loadtxt(os.path.joing(DIR_EXTR, spec_file), comments='#') + lm0tmp = fd0[:,0] + ninp0[ff] = len(lm0tmp) except Exception: print('File, %s/%s, cannot be open.'%(DIR_EXTR,spec_file)) pass - # Constructing arrays. + + # Then, Constructing arrays. lm = np.zeros(np.sum(ninp0[:]),dtype=float) fobs = np.zeros(np.sum(ninp0[:]),dtype=float) eobs = np.zeros(np.sum(ninp0[:]),dtype=float) fgrs = np.zeros(np.sum(ninp0[:]),dtype='int') # FLAG for each grism. for ff, spec_file in enumerate(spec_files): try: - fd0 = np.loadtxt(DIR_EXTR + spec_file, comments='#') - lm0tmp= fd0[:,0] - fobs0 = fd0[:,1] - eobs0 = fd0[:,2] + if spec_file.split('.')[-1] == 'asdf': + id_asdf = int(spec_file.split('_')[2]) + fd0 = asdf.open(os.path.join(DIR_EXTR, spec_file)) + lm0tmp = fd0[id_asdf]['wavelength'].to(u.angstrom).value + fobs0 = fd0[id_asdf]['flux'].value + eobs0 = np.sqrt(fd0[id_asdf]['fluxvar']).value + else: + fd0 = np.loadtxt(DIR_EXTR + spec_file, comments='#') + lm0tmp = fd0[:,0] + fobs0 = fd0[:,1] + eobs0 = fd0[:,2] + for ii1 in range(ninp0[ff]): if ff==0: ii = ii1 else: ii = ii1 + np.sum(ninp0[:ff]) fgrs[ii] = ff - lm[ii] = lm0tmp[ii1] + lm[ii] = lm0tmp[ii1] fobs[ii] = fobs0[ii1] eobs[ii] = eobs0[ii1] MB.f_spec = True + except Exception: + print('No spec data is registered.') pass - except: + else:#except: print('No spec file is provided.') pass @@ -989,7 +1016,6 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, else: if tt == 0: from astropy.modeling import models - from astropy import units as u print('Dust emission based on Modified Blackbody') ''' # from Eq.3 of Bianchi 13 @@ -1056,18 +1082,17 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, MB.af = asdf.open(MB.DIR_TMP + 'spec_all_' + MB.ID + '.asdf') ########################################## - # For observation. - # Write out for the Multi-component fitting. + # For data; ########################################## if True: MB.data = {} file_tmp = 'tmp_library_%s.txt'%MB.ID file_tmp2 = 'tmp_library2%s.txt'%MB.ID - fw = open(file_tmp,'w')# + fw = open(file_tmp,'w') fw.write('# BB data (>%d) in this file are not used in fitting.\n'%(ncolbb)) for ii in range(len(lm)): g_offset = 1000 * fgrs[ii] - if lm[ii]/(1.+zbest) > lamliml and lm[ii]/(1.+zbest) < lamlimu: + if lm[ii]/(1.+zbest) > lamliml and lm[ii]/(1.+zbest) < lamlimu and not np.isnan(fobs[ii]): fw.write('%d %.5f %.5e %.5e\n'%(ii+g_offset, lm[ii], fobs[ii], eobs[ii])) else: fw.write('%d %.5f 0 1000\n'%(ii+g_offset, lm[ii])) @@ -1075,7 +1100,7 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, for ii in range(len(ltmpbb[0,:])): if SFILT[ii] in SKIPFILT:# data point to be skiped; fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb, ltmpbb[0,ii], 0.0, fbb[ii])) - elif ebb[ii]>ebblim: + elif ebb[ii]>ebblim: fw.write('%d %.5f 0 1000\n'%(ii+ncolbb, ltmpbb[0,ii])) else: fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb, ltmpbb[0,ii], fbb[ii], ebb[ii])) From 5a4dc7fcb5a402b7cfc5d0de48c267206b513915 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Thu, 29 Dec 2022 16:29:42 -0800 Subject: [PATCH 02/20] cleaned --- gsf/fitting.py | 148 ++++++-------------------------------------- gsf/maketmp_filt.py | 27 ++++++-- gsf/zfit.py | 36 +++++------ 3 files changed, 55 insertions(+), 156 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index a07a69e..b38a19d 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -42,7 +42,7 @@ LW = [2800, 3347, 3727, 3799, 3836, 3869, 4102, 4341, 4861, 4960, 5008, 5175, 6563, 6717, 6731] fLW = np.zeros(len(LW), dtype='int') -NRbb_lim = 10000 # BB data is associated with ids greater than this number. +# NRbb_lim = 10000 # BB data is associated with ids greater than this number. class Mainbody(): ''' @@ -687,18 +687,20 @@ def read_data(self, Cz0:float, Cz1:float, Cz2:float, zgal:float, add_fir:bool=Fa # Spectrum ############## NR, x, fy00, ey00 = self.data['spec_obs']['NR'], self.data['spec_obs']['x'], self.data['spec_obs']['fy'], self.data['spec_obs']['ey'] - con0 = (NR<1000) - xx0 = x[con0] - fy0 = fy00[con0] * Cz0 - ey0 = ey00[con0] * Cz0 - con1 = (NR>=1000) & (NR<2000) - xx1 = x[con1] - fy1 = fy00[con1] * Cz1 - ey1 = ey00[con1] * Cz1 - con2 = (NR>=2000) & (NR=np.sum(data_len[:ii])) & (NR=1000) & (NR<10000) - fy1 = dict['fy'][con1] #* Cz1s - ey1 = dict['ey'][con1] #* Cz1s - x1 = dict['x'][con1] - con2 = (NR>=10000) # BB - fy2 = dict['fy'][con2] - ey2 = dict['ey'][con2] - x2 = dict['x'][con2] - - fy01 = np.append(fy0,fy1) - fcon = np.append(fy01,fy2) - ey01 = np.append(ey0,ey1) - eycon = np.append(ey01,ey2) - x01 = np.append(x0,x1) - xobs = np.append(x01,x2) - - wht = 1./np.square(eycon) - wht2 = wht - - # Set parameters; - fit_par_cz = Parameters() - for nn in range(len(fm_tmp[:,0])): - fit_par_cz.add('C%d'%nn, value=1., min=0., max=1e5) - - def residual_z(pars,z): - ''' - ''' - vals = pars.valuesdict() - xm_s = xm_tmp * (1+z) - fm_s = np.zeros(len(xm_tmp),'float') - - for nn in range(len(fm_tmp[:,0])): - fm_s += fm_tmp[nn,:] * pars['C%d'%nn] - - fint = interpolate.interp1d(xm_s, fm_s, kind='nearest', fill_value="extrapolate") - #fm_int = np.interp(xobs, xm_s, fm_s) - fm_int = fint(xobs) - - if fcon is None: - print('Data is none') - return fm_int - else: - return (fm_int - fcon) * np.sqrt(wht2) # i.e. residual/sigma - - # Start redshift search; - for zz in range(len(zspace)): - # Best fit - out_cz = minimize(residual_z, fit_par_cz, args=([zspace[zz]]), method=method) - keys = fit_report(out_cz).split('\n') - - csq = out_cz.chisqr - rcsq = out_cz.redchi - fitc_cz = [csq, rcsq] - - #return fitc_cz - chi2s[zz,0] = csq - chi2s[zz,1] = rcsq - - self.zspace = zspace - self.chi2s = chi2s - - return zspace, chi2s - - def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, zlimu=None, snlim=0, priors=None, f_bb_zfit=True, f_line_check=False, f_norm=True, f_lambda=False, zmax=20, include_bb=False, f_exclude_negative=False): @@ -946,7 +834,7 @@ def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, if include_bb: con_cz = ()#(sn>snlim) else: - con_cz = (self.dict['NR']snlim) + con_cz = (self.dict['NR']snlim) if f_exclude_negative: con_cz &= (sn>snlim) @@ -1028,7 +916,7 @@ def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, res_cz, fitc_cz = check_redshift( fy_cz, ey_cz, x_cz, fm_tmp, xm_tmp/(1+self.zgal), self.zgal, self.z_prior, self.p_prior, - NR_cz, zliml, zlimu, self.nmc_cz, self.nwalk_cz, + NR_cz, self.data['meta']['data_len'], zliml, zlimu, self.nmc_cz, self.nwalk_cz, include_photometry=include_bb ) diff --git a/gsf/maketmp_filt.py b/gsf/maketmp_filt.py index c1f3325..c496ed2 100644 --- a/gsf/maketmp_filt.py +++ b/gsf/maketmp_filt.py @@ -414,7 +414,9 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, # Get ascii data. # MB.f_spec = False - if True:#try: + data_meta = {'data_len':[],'data_origin':[],'data_index':[]} + + try: spec_files = [x.strip() for x in inputs['SPEC_FILE'].split(',')] ninp0 = np.zeros(len(spec_files), dtype='int') @@ -438,9 +440,9 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, lm = np.zeros(np.sum(ninp0[:]),dtype=float) fobs = np.zeros(np.sum(ninp0[:]),dtype=float) eobs = np.zeros(np.sum(ninp0[:]),dtype=float) - fgrs = np.zeros(np.sum(ninp0[:]),dtype='int') # FLAG for each grism. + fgrs = np.zeros(np.sum(ninp0[:]),dtype=int) # FLAG for each grism. for ff, spec_file in enumerate(spec_files): - try: + if True:#try: if spec_file.split('.')[-1] == 'asdf': id_asdf = int(spec_file.split('_')[2]) fd0 = asdf.open(os.path.join(DIR_EXTR, spec_file)) @@ -462,15 +464,25 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, lm[ii] = lm0tmp[ii1] fobs[ii] = fobs0[ii1] eobs[ii] = eobs0[ii1] + MB.f_spec = True + data_meta['data_len'] = np.append(data_meta['data_len'], len(lm0tmp)) + data_meta['data_origin'] = np.append(data_meta['data_origin'], '%s'%spec_file) + data_meta['data_index'] = np.append(data_meta['data_index'], '%d'%ff) - except Exception: + else:#except Exception: print('No spec data is registered.') pass - else:#except: + except: print('No spec file is provided.') pass + if ncolbb < np.sum(data_meta['data_len']): + print('ncolbb is updated') + ncolbb = np.sum(data_meta['data_len']) + MB.NRbb_lim = ncolbb + data_meta['data_len'] = np.asarray(data_meta['data_len']) + ############################# # READ BB photometry from CAT_BB: ############################# @@ -1086,12 +1098,15 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, ########################################## if True: MB.data = {} + MB.data['meta'] = data_meta + + # The following files are just temporary. file_tmp = 'tmp_library_%s.txt'%MB.ID file_tmp2 = 'tmp_library2%s.txt'%MB.ID fw = open(file_tmp,'w') fw.write('# BB data (>%d) in this file are not used in fitting.\n'%(ncolbb)) for ii in range(len(lm)): - g_offset = 1000 * fgrs[ii] + g_offset = 0 #1000 * fgrs[ii] if lm[ii]/(1.+zbest) > lamliml and lm[ii]/(1.+zbest) < lamlimu and not np.isnan(fobs[ii]): fw.write('%d %.5f %.5e %.5e\n'%(ii+g_offset, lm[ii], fobs[ii], eobs[ii])) else: diff --git a/gsf/zfit.py b/gsf/zfit.py index 3aec6aa..6e352c2 100644 --- a/gsf/zfit.py +++ b/gsf/zfit.py @@ -30,7 +30,7 @@ def lnprob_cz(pars, zprior, prior, zliml, zlimu, args, kwargs): return -0.5 * np.sum(resid) -def residual_z(pars, xm_tmp, fm_tmp, xobs, fobs, eobs, NR, NRbb_lim=10000, include_photometry=True, f_line_check=False): +def residual_z(pars, xm_tmp, fm_tmp, xobs, fobs, eobs, NR, data_len, NRbb_lim=10000, include_photometry=True, f_line_check=False): ''' ''' vals = pars.valuesdict() @@ -45,24 +45,20 @@ def residual_z(pars, xm_tmp, fm_tmp, xobs, fobs, eobs, NR, NRbb_lim=10000, inclu # fint = interpolate.interp1d(xm_s, fm_tmp, kind='linear', fill_value="extrapolate") fm_s = fint(xobs) - con0 = (NR<1000) - fy0 = fobs[con0] * Cz0s - ey0 = eobs[con0] * Cz0s - con1 = (NR>=1000) & (NR<2000) - fy1 = fobs[con1] * Cz1s - ey1 = eobs[con1] * Cz1s - con2 = (NR>=2000) & (NR=NRbb_lim) # BB - fy_bb = fobs[con_bb] - ey_bb = eobs[con_bb] - - fy01 = np.append(fy0,fy1) - ey01 = np.append(ey0,ey1) - fy02 = np.append(fy01,fy2) - ey02 = np.append(ey01,ey2) + Cs = [Cz0s, Cz1s, Cz2s] + fy02 = [] + ey02 = [] + for ii in range(len(data_len)): + if ii == 0: + con0 = (NR=np.sum(data_len[:ii])) & (NR=NRbb_lim) # BB + fy_bb = fobs[con_bb] + ey_bb = eobs[con_bb] if include_photometry and len(fy_bb)>0: fcon = np.append(fy02,fy_bb) eycon = np.append(ey02,ey_bb) @@ -87,7 +83,7 @@ def residual_z(pars, xm_tmp, fm_tmp, xobs, fobs, eobs, NR, NRbb_lim=10000, inclu return (fm_s - fcon) * np.sqrt(wht2) # i.e. residual/sigma -def check_redshift(fobs, eobs, xobs, fm_tmp, xm_tmp, zbest, zprior, prior, NR, zliml, zlimu, \ +def check_redshift(fobs, eobs, xobs, fm_tmp, xm_tmp, zbest, zprior, prior, NR, data_len, zliml, zlimu, \ nmc_cz=100, nwalk_cz=10, nthin=5, f_line_check=False, f_vary=True, NRbb_lim=10000, include_photometry=True): ''' Fit observed flux with a template to get redshift probability. @@ -137,7 +133,7 @@ def check_redshift(fobs, eobs, xobs, fm_tmp, xm_tmp, zbest, zprior, prior, NR, z fit_par_cz.add('Cz2', value=1, min=0.5, max=1.5) # Get Best fit - args_res = (xm_tmp, fm_tmp, xobs, fobs, eobs, NR) + args_res = (xm_tmp, fm_tmp, xobs, fobs, eobs, NR, data_len) kwargs_res = {'include_photometry':include_photometry, 'f_line_check':f_line_check, 'NRbb_lim':NRbb_lim} out_cz = minimize(residual_z, fit_par_cz, args=args_res, method='nelder') From 4b47bfd0bef518e08d70b0e3429a01abd1ae1414 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Thu, 29 Dec 2022 22:57:15 -0800 Subject: [PATCH 03/20] zfit plot --- gsf/fitting.py | 20 ++++++++++++-------- gsf/maketmp_filt.py | 2 +- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index b38a19d..ca3d30f 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -902,9 +902,17 @@ def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, data_model_sort = data_model[data_model[:, 0].argsort()] #data_model_sort = np.sort(data_model, axis=0) # This does not work!! - plt.plot(data_model_sort[:,0], data_model_sort[:,1], 'gray', linestyle='--', linewidth=0.5, label='') # Model based on input z. - plt.plot(data_model_sort[:,0], data_model_sort[:,2],'.b', linestyle='-', linewidth=0.5, label='Obs.') # Observation - plt.errorbar(data_model_sort[:,0], data_model_sort[:,2], yerr=data_model_sort[:,3], color='b', capsize=0, linewidth=0.5) # Observation + # Model based on input z. + plt.plot(data_model_sort[:,0], data_model_sort[:,1], 'gray', linestyle='--', linewidth=0.5, label='') + # Observation + # spec; + con = (self.dict['ey']<1000) & (self.dict['NR']=self.NRbb_lim) + plt.errorbar(self.dict['x'][con], self.dict['fy'][con], yerr=self.dict['ey'][con], ms=5, marker='o', + color='orange', capsize=0, linewidth=0.5, ls='None') # Write prob distribution; if True: @@ -940,7 +948,6 @@ def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, print('Recommended redshift, Cz0, Cz1, and Cz2, %.5f %.5f %.5f %.5f, with chi2/nu=%.3f'%(zrecom, Czrec0, Czrec1, Czrec2, fitc_cz[1])) print('\n\n') fit_label = 'Proposed model' - #self.fitc_cz = fitc_cz[1] else: print('fzvis is set to False. z fit not happening.') @@ -990,7 +997,6 @@ def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, data_model_new = np.zeros((len(x_cz),4),'float') data_model_new[:,0] = x_cz data_model_new[:,1] = fm_s - # data_model_new_sort = np.sort(data_model_new, axis=0) data_model_new_sort = data_model_new[data_model_new[:, 0].argsort()] plt.plot(data_model_new_sort[:,0], data_model_new_sort[:,1], 'r', linestyle='-', linewidth=0.5, label='%s ($z=%.5f$)'%(fit_label,zrecom)) # Model based on recomended z. @@ -1016,7 +1022,6 @@ def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, data_obsbb[:,0],data_obsbb[:,1] = self.dict['xbb'],self.dict['fybb'] if len(fm_tmp) == len(self.dict['xbb']): # BB only; data_obsbb[:,2] = fm_tmp - #data_obsbb_sort = np.sort(data_obsbb, axis=0) data_obsbb_sort = data_obsbb[data_obsbb[:, 0].argsort()] if len(fm_tmp) == len(self.dict['xbb']): # BB only; @@ -1024,7 +1029,6 @@ def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, else: model_spec = np.zeros((len(fm_tmp),2), 'float') model_spec[:,0],model_spec[:,1] = xm_tmp,fm_tmp - #model_spec_sort = np.sort(model_spec, axis=0) model_spec_sort = model_spec[model_spec[:, 0].argsort()] plt.plot(model_spec_sort[:,0], model_spec_sort[:,1], marker='.', color='gray', ms=1, linestyle='-', linewidth=0.5, zorder=4, label='Current model ($z=%.5f$)'%(self.zgal)) @@ -1475,7 +1479,7 @@ def get_shuffle(self, out, nshuf=3.0, amp=1e-4): def main(self, cornerplot:bool=True, specplot=1, sigz=1.0, ezmin=0.01, ferr=0, f_move:bool=False, verbose:bool=False, skip_fitz:bool=False, out=None, f_plot_accept:bool=True, f_shuffle:bool=True, amp_shuffle=1e-2, check_converge:bool=True, Zini=None, f_plot_chain:bool=True, - f_chind:bool=True, ncpu:int=0, f_prior_sfh:bool=False, norder_sfh_prior:int=3, include_bb=True): + f_chind:bool=True, ncpu:int=0, f_prior_sfh:bool=False, norder_sfh_prior:int=3, include_bb=False): ''' Main module of this script. diff --git a/gsf/maketmp_filt.py b/gsf/maketmp_filt.py index c496ed2..49187ba 100644 --- a/gsf/maketmp_filt.py +++ b/gsf/maketmp_filt.py @@ -896,7 +896,6 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, # Register filter response; ltmpbb[ss,:], ftmpbb[ss,:] = filconv(SFILT, wavetmp, spec_mul_nu[ss,:], DIR_FILT, MB=MB, f_regist=False) - ########################################## # Writing out the templates to fits table. ########################################## @@ -1139,6 +1138,7 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, else: fw.write('%d %.5f %.5e %.5e\n'%(ii+ncolbb+nbblast, ltmpbb_d[ii+nbblast], fbb_d[ii], ebb_d[ii])) fw.close() + if MB.f_dust: dat = ascii.read(file_tmp, format='no_header') nr_d = dat['col1'] From 424f30321dab9a25badd25be248e99706fc31a85 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Fri, 30 Dec 2022 17:19:55 -0800 Subject: [PATCH 04/20] some tweak to nspec --- gsf/fitting.py | 2 +- gsf/gsf.py | 10 ++++++++++ gsf/maketmp_filt.py | 5 +++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index ca3d30f..05edea1 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -1479,7 +1479,7 @@ def get_shuffle(self, out, nshuf=3.0, amp=1e-4): def main(self, cornerplot:bool=True, specplot=1, sigz=1.0, ezmin=0.01, ferr=0, f_move:bool=False, verbose:bool=False, skip_fitz:bool=False, out=None, f_plot_accept:bool=True, f_shuffle:bool=True, amp_shuffle=1e-2, check_converge:bool=True, Zini=None, f_plot_chain:bool=True, - f_chind:bool=True, ncpu:int=0, f_prior_sfh:bool=False, norder_sfh_prior:int=3, include_bb=False): + f_chind:bool=True, ncpu:int=0, f_prior_sfh:bool=False, norder_sfh_prior:int=3, include_bb=True): ''' Main module of this script. diff --git a/gsf/gsf.py b/gsf/gsf.py index 6a68a92..3b727f8 100644 --- a/gsf/gsf.py +++ b/gsf/gsf.py @@ -271,6 +271,16 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman=None, zman=No ''' if fplt == 6: + # Use the final redshift; + from astropy.io import fits + hd_sum = fits.open(os.path.join(MB.DIR_OUT, 'summary_%s.fits'%MB.ID))[0].header + MB.zgal = hd_sum['ZMC'] + + if MB.SFH_FORM == -99: + flag_suc = maketemp(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + else: + flag_suc = maketemp_tau(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + if MB.SFH_FORM == -99: from .plot_sed import plot_corner_physparam_frame,plot_corner_physparam_summary plot_corner_physparam_summary(MB) diff --git a/gsf/maketmp_filt.py b/gsf/maketmp_filt.py index 49187ba..ffd8904 100644 --- a/gsf/maketmp_filt.py +++ b/gsf/maketmp_filt.py @@ -414,7 +414,7 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, # Get ascii data. # MB.f_spec = False - data_meta = {'data_len':[],'data_origin':[],'data_index':[]} + data_meta = {'data_len':np.zeros(3,int),'data_origin':[],'data_index':[]} try: spec_files = [x.strip() for x in inputs['SPEC_FILE'].split(',')] @@ -466,7 +466,8 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, eobs[ii] = eobs0[ii1] MB.f_spec = True - data_meta['data_len'] = np.append(data_meta['data_len'], len(lm0tmp)) + # data_meta['data_len'] = np.append(data_meta['data_len'], len(lm0tmp)) + data_meta['data_len'][ff] = len(lm0tmp) data_meta['data_origin'] = np.append(data_meta['data_origin'], '%s'%spec_file) data_meta['data_index'] = np.append(data_meta['data_index'], '%d'%ff) From 903a44a7bb79fd14de76c78c7e1c099ac4a4ed3f Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Fri, 30 Dec 2022 17:20:14 -0800 Subject: [PATCH 05/20] Update plot_sed.py --- gsf/plot_sed.py | 419 ++++++------------------------------------------ 1 file changed, 49 insertions(+), 370 deletions(-) diff --git a/gsf/plot_sed.py b/gsf/plot_sed.py index dcfb10f..8e53c83 100644 --- a/gsf/plot_sed.py +++ b/gsf/plot_sed.py @@ -204,20 +204,21 @@ def gaus(x,a,x0,sigma): else: MB.dict = MB.read_data(Cz0, Cz1, Cz2, zbes) - NR = MB.dict['NR'] - x = MB.dict['x'] - fy = MB.dict['fy'] - ey = MB.dict['ey'] - - con0 = (NR<1000) + NR = MB.dict['NR'] + x = MB.dict['x'] + fy = MB.dict['fy'] + ey = MB.dict['ey'] + data_len = MB.data['meta']['data_len'] + + con0 = (NR=1000) & (NR<2000) + con1 = (NR>=data_len[0]) & (NR=1000) & (NR=data_len[1]) & (NRSNlim) + con0 = (NR=1000) & (NR<2000) #& (fy/ey>SNlim) + fg0 = fy[con0] + eg0 = ey[con0] + con1 = (NR>=data_len[0]) & (NR=2000) & (NRSNlim) + fg1 = fy[con1] + eg1 = ey[con1] + con2 = (NR>=data_len[1]) & (NR=NRbb_lim) - xg_bb = x[con_bb] - fg_bb = fy00[con_bb] - eg_bb = ey00[con_bb] + fg2 = fy[con2] + eg2 = ey[con2] + + con_bb = (NR>=MB.NRbb_lim)#& (fy/ey>SNlim) + xg_bb = x[con_bb] + fg_bb = fy[con_bb] + eg_bb = ey[con_bb] fy01 = np.append(fg0,fg1) ey01 = np.append(eg0,eg1) @@ -2909,7 +2909,6 @@ def plot_corner_physparam_summary(MB, fig=None, out_ind=0, DIR_OUT='./', mmax=10 # This is because fsps has the minimum tau = tau_lim delT[aa] = tau_lim - delT[:] *= 1e9 # Gyr to yr delTl[:] *= 1e9 # Gyr to yr delTu[:] *= 1e9 # Gyr to yr @@ -3109,6 +3108,7 @@ def density_estimation(m1, m2): else: NPAR = [lmtmp[:kk+1], Ttmp[:kk+1], Avtmp[:kk+1], Ztmp[:kk+1]] + # This should happen at the last kk; if kk == mmax-1: # Histogram for i, x in enumerate(Par): @@ -3125,9 +3125,10 @@ def density_estimation(m1, m2): ax.plot(yy*0+np.percentile(NPAR[i],84), yy, linestyle='--', color='gray', lw=1) ax.plot(yy*0+np.percentile(NPAR[i],50), yy, linestyle='-', color='gray', lw=1) - ax.text(np.percentile(NPAR[i],16), np.max(yy)*1.02, '%.2f'%(np.percentile(NPAR[i],16)), fontsize=9) - ax.text(np.percentile(NPAR[i],50), np.max(yy)*1.02, '%.2f'%(np.percentile(NPAR[i],50)), fontsize=9) - ax.text(np.percentile(NPAR[i],84), np.max(yy)*1.02, '%.2f'%(np.percentile(NPAR[i],84)), fontsize=9) + # percentile values? + # ax.text(np.percentile(NPAR[i],16), np.max(yy)*1.02, '%.2f'%(np.percentile(NPAR[i],16)), fontsize=9) + # ax.text(np.percentile(NPAR[i],50), np.max(yy)*1.02, '%.2f'%(np.percentile(NPAR[i],50)), fontsize=9) + # ax.text(np.percentile(NPAR[i],84), np.max(yy)*1.02, '%.2f'%(np.percentile(NPAR[i],84)), fontsize=9) except: print('Failed at i,x=',i,x) @@ -3138,6 +3139,17 @@ def density_estimation(m1, m2): if i < K-1: ax.set_xticklabels([]) + # save pck; + if save_pcl: + if MB.fzmc == 1: + NPAR_LIB = {'logM_stel':lmtmp[:kk+1], 'logT_MW':Ttmp[:kk+1], 'AV':Avtmp[:kk+1], 'logZ_MW':Ztmp[:kk+1], 'z':redshifttmp[:kk+1]} + else: + NPAR_LIB = {'logM_stel':lmtmp[:kk+1], 'logT_MW':Ttmp[:kk+1], 'AV':Avtmp[:kk+1], 'logZ_MW':Ztmp[:kk+1]} + cpklname = os.path.join(MB.DIR_OUT, 'chain_' + MB.ID + '_phys.cpkl') + savecpkl({'chain':NPAR_LIB, + 'burnin':burnin, 'nwalkers':nwalk,'niter':nmc,'ndim':ndim}, + cpklname) # Already burn in + # Scatter and contour for i, x in enumerate(Par): for j, y in enumerate(Par): @@ -3185,9 +3197,8 @@ def density_estimation(m1, m2): if i < K-1: ax.set_xticklabels([]) - if kk%10 == 0 and out_ind == 1: - fname = MB.DIR_OUT + + '%d.png' % kk + fname = os.path.join(MB.DIR_OUT, '%d.png' % kk) print('Saving frame', fname) plt.savefig(fname, dpi=200) files.append(fname) @@ -3252,338 +3263,6 @@ def density_estimation(m1, m2): plt.savefig(MB.DIR_OUT + 'param_' + ID + '_corner.png', dpi=150) -def plot_corner_physparam_cumulative_frame(ID, Zall=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, 1.0, 3.0], \ - tau0=[0.1,0.2,0.3], fig=None, dust_model=0, out_ind=0, snlimbb=1.0, DIR_OUT='./', NRbb_lim=10000): - ''' - Creat "cumulative" png for gif image. - - If you like to creat temporal png for gif image. - - Parameters - ---------- - snlimbb : float - SN limit to show flux or up lim in SED. - ''' - col = ['violet', 'indigo', 'b', 'lightblue', 'lightgreen', 'g', 'orange', 'coral', 'r', 'darkred']#, 'k'] - nage = np.arange(0,len(age),1) - fnc = Func(ID, PA, Zall, age, dust_model=dust_model) # Set up the number of Age/ZZ - bfnc = Basic(Zall) - - ########################### - # Open result file - ########################### - # Open ascii file and stock to array. - lib = fnc.open_spec_fits(ID, PA, fall=0) - lib_all = fnc.open_spec_fits(ID, PA, fall=1) - - file = 'summary_' + ID + '.fits' - hdul = fits.open(file) # open a FITS file - - # Redshift MC - zp50 = hdul[1].data['zmc'][1] - zp16 = hdul[1].data['zmc'][0] - zp84 = hdul[1].data['zmc'][2] - - M50 = hdul[1].data['ms'][1] - M16 = hdul[1].data['ms'][0] - M84 = hdul[1].data['ms'][2] - print('Total stellar mass is %.2e'%(M50)) - - A50 = np.zeros(len(age), dtype='float') - A16 = np.zeros(len(age), dtype='float') - A84 = np.zeros(len(age), dtype='float') - for aa in range(len(age)): - A50[aa] = hdul[1].data['A'+str(aa)][1] - A16[aa] = hdul[1].data['A'+str(aa)][0] - A84[aa] = hdul[1].data['A'+str(aa)][2] - - Asum = np.sum(A50) - aa = 0 - Av50 = hdul[1].data['Av'+str(aa)][1] - Av16 = hdul[1].data['Av'+str(aa)][0] - Av84 = hdul[1].data['Av'+str(aa)][2] - Z50 = np.zeros(len(age), dtype='float') - Z16 = np.zeros(len(age), dtype='float') - Z84 = np.zeros(len(age), dtype='float') - NZbest = np.zeros(len(age), dtype='int') - for aa in range(len(age)): - Z50[aa] = hdul[1].data['Z'+str(aa)][1] - Z16[aa] = hdul[1].data['Z'+str(aa)][0] - Z84[aa] = hdul[1].data['Z'+str(aa)][2] - NZbest[aa]= bfnc.Z2NZ(Z50[aa]) - - - ZZ50 = np.sum(Z50*A50)/np.sum(A50) # Light weighted Z. - chi = hdul[1].data['chi'][0] - chin = hdul[1].data['chi'][1] - fitc = chin - Cz0 = hdul[0].header['Cz0'] - Cz1 = hdul[0].header['Cz1'] - Cz2 = hdul[0].header['Cz2'] - zbes = hdul[0].header['z'] - zscl = (1.+zbes) - - # Repeat no. - nplot = 1000 - #DIR_OUT = '/astro/udfcen3/Takahiro/sedfitter/corner/' + ID + '_corner/' - try: - os.makedirs(DIR_OUT) - except: - pass - - # plot Configuration - K = 4 # No of params. - Par = ['$\log M_*/M_\odot$', '$\log T$/Gyr', '$A_V$/mag', '$\log Z / Z_\odot$'] - factor = 2.0 # size of one side of one panel - lbdim = 0.5 * factor # size of left/bottom margin - trdim = 0.2 * factor # size of top/right margin - whspace = 0.02 # w/hspace size - plotdim = factor * K + factor * (K - 1.) * whspace - dim = lbdim + plotdim + trdim - sclfig = 0.7 - - # Create a new figure if one wasn't provided. - if fig is None: - fig, axes = plt.subplots(K, K, figsize=(dim*sclfig, dim*sclfig)) - else: - try: - axes = np.array(fig.axes).reshape((K, K)) - except: - raise ValueError("Provided figure has {0} axes, but data has " - "dimensions K={1}".format(len(fig.axes), K)) - # Format the figure. - lb = lbdim / dim - tr = (lbdim + plotdim) / dim - fig.subplots_adjust(left=lb* 1.06, bottom=lb*.9, right=tr, top=tr*.99, - wspace=whspace, hspace=whspace) - - # For spec plot - ax0 = fig.add_axes([0.62,0.61,0.37,0.33]) - - ############################### - # Data taken from - ############################### - DIR_TMP = './templates/' - dat = np.loadtxt(DIR_TMP + 'spec_obs_' + ID + '.cat', comments='#') - NR = dat[:, 0] - x = dat[:, 1] - fy00 = dat[:, 2] - ey00 = dat[:, 3] - - con0 = (NR<1000) #& (fy/ey>SNlim) - xg0 = x[con0] - fg0 = fy00[con0] * Cz0 - eg0 = ey00[con0] * Cz0 - con1 = (NR>=1000) & (NR<2000) #& (fy/ey>SNlim) - xg1 = x[con1] - fg1 = fy00[con1] * Cz1 - eg1 = ey00[con1] * Cz1 - con2 = (NR>=2000) & (NRSNlim) - xg2 = x[con2] - fg2 = fy00[con2] * Cz2 - eg2 = ey00[con2] * Cz2 - - con_bb = (NR>=NRbb_lim)#& (fy/ey>SNlim) - xg_bb = x[con_bb] - fg_bb = fy00[con_bb] - eg_bb = ey00[con_bb] - - fy01 = np.append(fg0,fg1) - ey01 = np.append(eg0,eg1) - fy02 = np.append(fy01,fg2) - ey02 = np.append(ey01,eg2) - - fy = np.append(fy02,fg_bb) - ey = np.append(ey02,eg_bb) - wht=1./np.square(ey) - - dat = np.loadtxt(DIR_TMP + 'bb_obs_' + ID + '.cat', comments='#') - NRbb = dat[:,0] - xbb = dat[:,1] - fybb = dat[:,2] - eybb = dat[:,3] - exbb = dat[:,4] - snbb = fybb/eybb - - conspec = (NR1) - #ax0.plot(xg0, fg0 * c / np.square(xg0) / d, marker='', linestyle='-', linewidth=0.5, ms=0.1, color='royalblue', label='') - #ax0.plot(xg1, fg1 * c / np.square(xg1) / d, marker='', linestyle='-', linewidth=0.5, ms=0.1, color='#DF4E00', label='') - conbb = (fybb/eybb>snlimbb) - ax0.errorbar(xbb[conbb], fybb[conbb] * c / np.square(xbb[conbb]) / d, yerr=eybb[conbb]*c/np.square(xbb[conbb])/d, color='k', linestyle='', linewidth=0.5, zorder=4) - ax0.plot(xbb[conbb], fybb[conbb] * c / np.square(xbb[conbb]) / d, '.r', ms=10, linestyle='', linewidth=0, zorder=4) - - conbbe = (fybb/eybb NO keys of ndim and burnin found in cpkl, use input keyword values') - - f0 = fits.open(DIR_TMP + 'ms_' + ID + '.fits') - sedpar = f0[1] - - import matplotlib - import matplotlib.cm as cm - getcmap = matplotlib.cm.get_cmap('jet') - nc = np.arange(0, nmc, 1) - col = getcmap((nc-0)/(nmc-0)) - - #for kk in range(0,nmc,1): - Ntmp = np.zeros(nplot, dtype='float') - lmtmp= np.zeros(nplot, dtype='float') - Avtmp= np.zeros(nplot, dtype='float') - Ztmp = np.zeros(nplot, dtype='float') - Ttmp = np.zeros(nplot, dtype='float') - ACtmp= np.zeros(nplot, dtype='float') - - files = [] # For movie - for kk in range(0,nplot,1): - - #nr = kk # np.random.randint(len(samples)) - nr = np.random.randint(len(samples)) - Avtmp[kk] = samples['Av'][nr] - #Asum = 0 - #for ss in range(len(age)): - #Asum += np.sum(samples['A'+str(ss)][nr]) - II0 = nage #[0,1,2,3] # Number for templates - for ss in range(len(age)): - ii = int(len(II0) - ss - 1) # from old to young templates. - AA_tmp = samples['A'+str(ii)][nr] - try: - ZZ_tmp = samples['Z'+str(ii)][nr] - except: - ZZ_tmp = samples['Z0'][nr] - nZtmp = bfnc.Z2NZ(ZZ_tmp) - mslist = sedpar.data['ML_'+str(nZtmp)][ii] - lmtmp[kk] += AA_tmp * mslist - Ztmp[kk] += (10 ** ZZ_tmp) * AA_tmp * mslist - Ttmp[kk] += age[ii] * AA_tmp * mslist - ACtmp[kk] += AA_tmp * mslist - - # SED - flim = 0.05 - if ss == 0: - y0, x0 = fnc.get_template_single(AA_tmp, Avtmp[kk], ii, ZZ_tmp, zbes, lib_all, tau0=tau0) - y0p, x0p = fnc.get_template_single(AA_tmp, Avtmp[kk], ii, ZZ_tmp, zbes, lib, tau0=tau0) - ysump = y0p #* 1e18 - ysum = y0 #* 1e18 - if AA_tmp/Asum > flim: - ax0.plot(x0, y0 * c/ np.square(x0) / d, '--', lw=0.1, color=col[ii], zorder=-1, label='', alpha=0.1) - else: - y0_r, x0_tmp = fnc.get_template_single(AA_tmp, Avtmp[kk], ii, ZZ_tmp, zbes, lib_all, tau0=tau0) - y0p, x0p = fnc.get_template_single(AA_tmp, Avtmp[kk], ii, ZZ_tmp, zbes, lib, tau0=tau0) - ysump += y0p #* 1e18 - ysum += y0_r #* 1e18 - if AA_tmp/Asum > flim: - ax0.plot(x0, y0_r * c/ np.square(x0) / d, '--', lw=0.1, color=col[ii], zorder=-1, label='', alpha=0.1) - # Total - ax0.plot(x0, ysum * c/ np.square(x0) / d, '-', lw=0.1, color='gray', zorder=-1, label='', alpha=0.1) - ax0.set_xlim(2200, 88000) - ax0.set_xscale('log') - ax0.set_ylim(0., ymax) - - # Convert into log - Ztmp[kk] /= ACtmp[kk] - Ttmp[kk] /= ACtmp[kk] - Ntmp[kk] = kk - - lmtmp[kk] = np.log10(lmtmp[kk]) - Ztmp[kk] = np.log10(Ztmp[kk]) - Ttmp[kk] = np.log10(Ttmp[kk]) - - - NPAR = [lmtmp[:kk+1], Ttmp[:kk+1], Avtmp[:kk+1], Ztmp[:kk+1]] - NPARmin = [np.log10(M16)-0.1, -0.4, Av16-0.1, -0.5] - NPARmax = [np.log10(M84)+0.1, 0.5, Av84+0.1, 0.5] - - #for kk in range(0,nplot,1): - if kk == nplot-1: - # Histogram - for i, x in enumerate(Par): - ax = axes[i, i] - x1min, x1max = NPARmin[i], NPARmax[i] - nbin = 50 - binwidth1 = (x1max-x1min)/nbin - bins1 = np.arange(x1min, x1max + binwidth1, binwidth1) - ax.hist(NPAR[i], bins=bins1, orientation='vertical', color='b', histtype='stepfilled', alpha=0.6) - ax.set_xlim(x1min, x1max) - ax.set_yticklabels([]) - if i == K-1: - ax.set_xlabel('%s'%(Par[i]), fontsize=12) - if i < K-1: - ax.set_xticklabels([]) - - # Scatter and contour - for i, x in enumerate(Par): - for j, y in enumerate(Par): - if i > j: - ax = axes[i, j] - ax.scatter(NPAR[j], NPAR[i], c='b', s=1, marker='o', alpha=0.01) - ax.set_xlabel('%s'%(Par[j]), fontsize=12) - - x1min, x1max = NPARmin[j], NPARmax[j] - y1min, y1max = NPARmin[i], NPARmax[i] - ax.set_xlim(x1min, x1max) - ax.set_ylim(y1min, y1max) - - if j==0: - ax.set_ylabel('%s'%(Par[i]), fontsize=12) - if j>0: - ax.set_yticklabels([]) - if i Date: Fri, 30 Dec 2022 17:43:22 -0800 Subject: [PATCH 06/20] Update plot_sed.py --- gsf/plot_sed.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/gsf/plot_sed.py b/gsf/plot_sed.py index 8e53c83..4bd4f5e 100644 --- a/gsf/plot_sed.py +++ b/gsf/plot_sed.py @@ -214,14 +214,16 @@ def gaus(x,a,x0,sigma): xg0 = x[con0] fg0 = fy[con0] eg0 = ey[con0] - con1 = (NR>=data_len[0]) & (NR=data_len[0]) & (NR=data_len[1]) & (NR=data_len[1]+data_len[0]) & (NR0 or len(xg1)>0 or len(xg2)>0: f_grsm = True @@ -610,10 +612,9 @@ def gaus(x,a,x0,sigma): # Zoom in Line regions ########################## if f_grsm: - conspec = (NR<10000) #& (fy/ey>1) ax2t.errorbar(xg2, fg2 * c/np.square(xg2)/d, yerr=eg2 * c/np.square(xg2)/d, lw=0.5, color='#DF4E00', zorder=10, alpha=1., label='', capsize=0) ax2t.errorbar(xg1, fg1 * c/np.square(xg1)/d, yerr=eg1 * c/np.square(xg1)/d, lw=0.5, color='g', zorder=10, alpha=1., label='', capsize=0) - ax2t.errorbar(xg0, fg0 * c/np.square(xg0)/d, yerr=eg0 * c/np.square(xg0)/d, lw=0.5, linestyle='', color='royalblue', zorder=10, alpha=1., label='', capsize=0) + ax2t.errorbar(xg0, fg0 * c/np.square(xg0)/d, yerr=eg0 * c/np.square(xg0)/d, lw=0.5, color='royalblue', zorder=10, alpha=1., label='', capsize=0) xgrism = np.concatenate([xg0,xg1,xg2]) fgrism = np.concatenate([fg0,fg1,fg2]) @@ -899,8 +900,6 @@ def func_tmp(xint,eobs,fmodel): col_dia = 'blue' if MB.f_dust: ALLFILT = np.append(SFILT,DFILT) - #for ii in range(len(x1_tot)): - # print(x1_tot[ii], model_tot[ii]*c/np.square(x1_tot[ii])/d) lbb, fbb, lfwhm = filconv(ALLFILT, x1_tot, ytmp50, DIR_FILT, fw=True) lbb, fbb16, lfwhm = filconv(ALLFILT, x1_tot, ytmp16, DIR_FILT, fw=True) lbb, fbb84, lfwhm = filconv(ALLFILT, x1_tot, ytmp84, DIR_FILT, fw=True) @@ -1244,7 +1243,8 @@ def func_tmp(xint,eobs,fmodel): ax1.xaxis.labelpad = -3 if f_grsm: - if np.max(xg0)<23000: # E.g. WFC3, NIRISS grisms + if wave_spec_max<23000: + # E.g. WFC3, NIRISS grisms conlim = (x0>10000) & (x0<25000) xgmin, xgmax = np.min(x0[conlim]),np.max(x0[conlim]), #7500, 17000 ax2t.set_xlabel('') @@ -1263,7 +1263,8 @@ def func_tmp(xint,eobs,fmodel): ax2t.set_xticks([8000, 10000, 12000, 14000, 16000]) ax2t.set_xticklabels(['0.8', '1.0', '1.2', '1.4', '1.6']) else: - conlim = (x0>10000) & (x0<54000) # NIRSPEC spectrum; + # NIRSPEC spectrum; + conlim = (x0>10000) & (x0<54000) xgmin, xgmax = np.min(x0[conlim]),np.max(x0[conlim]), #7500, 17000 ax2t.set_xlabel('') ax2t.set_xlim(xgmin, xgmax) @@ -2796,11 +2797,11 @@ def plot_corner_physparam_summary(MB, fig=None, out_ind=0, DIR_OUT='./', mmax=10 xg0 = x[con0] fg0 = fy[con0] eg0 = ey[con0] - con1 = (NR>=data_len[0]) & (NR=data_len[0]) & (NR=data_len[1]) & (NR=data_len[1]+data_len[0]) & (NR Date: Fri, 30 Dec 2022 21:27:15 -0800 Subject: [PATCH 07/20] for notebook to run --- gsf/fitting.py | 166 +++++++++++++++++++++++++++++++++++++- gsf/maketmp_filt.py | 16 ++-- gsf/posterior_flexible.py | 2 +- 3 files changed, 174 insertions(+), 10 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index 05edea1..60a7e2f 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -776,7 +776,7 @@ def read_data(self, Cz0:float, Cz1:float, Cz2:float, zgal:float, add_fir:bool=Fa def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, - zlimu=None, snlim=0, priors=None, f_bb_zfit=True, f_line_check=False, + zlimu=None, snlim=0, priors=None, f_line_check=False, f_norm=True, f_lambda=False, zmax=20, include_bb=False, f_exclude_negative=False): ''' Find the best-fit redshift, before going into a big fit, through an interactive inspection. @@ -1959,3 +1959,167 @@ def __init__(self, flatchain, var_names, params_value, res): print('Terminating process.') print('\n\n') return False + + + def quick_fit(self, specplot=1, sigz=1.0, ezmin=0.01, ferr=0, f_move=False, + f_get_templates=False, Zini=None, include_bb=True): + '''Fits input data with a prepared template library, to get a chi-min result. + This function is being used in an example notebook. + + Parameters: + ----------- + Zini : array + Array for initial values for metallicity. + + Returns + ------- + out, chidef, Zbest, xm_tmp, fm_tmp (if f_get_templates is set True). + ''' + from .posterior_flexible import Post + print('#########') + print('Quick fit') + print('#########\n') + + # Call likelihood/prior/posterior function; + class_post = Post(self) + + # Prepare library, data, etc. + self.prepare_class() + + # Initial Z: + if Zini == None: + Zini = self.Zall + + # Temporarily disable zmc; + self.fzmc = 0 + out, chidef, Zbest = get_leastsq(self, Zini, self.fneld, self.age, self.fit_params, class_post.residual,\ + self.dict['fy'], self.dict['ey'], self.dict['wht2'], self.ID) + + if f_get_templates: + Av_tmp = out.params['Av'].value + AA_tmp = np.zeros(len(self.age), dtype='float') + ZZ_tmp = np.zeros(len(self.age), dtype='float') + # fm_tmp, xm_tmp = self.fnc.tmp04(out, f_val=True) + fm_tmp, xm_tmp = self.fnc.get_template(out, f_val=True, f_nrd=False, f_neb=False) + + ######################## + # Check redshift + ######################## + if self.fzvis: + flag_z = self.fit_redshift(xm_tmp, fm_tmp, delzz=0.001, include_bb=include_bb) + + self.fzmc = 1 + return out,chidef,Zbest, xm_tmp, fm_tmp + else: + self.fzmc = 1 + return out,chidef,Zbest + + + def search_redshift(self, dict, xm_tmp, fm_tmp, zliml=0.01, zlimu=6.0, delzz=0.01, lines=False, prior=None, method='powell'): + ''' + This module explores the redshift space to find the best redshift and probability distribution. + + Parameters + ---------- + dict : dictionary + Dictionary that includes input data. + + xm_tmp : numpy.array + Wavelength array, common for fm_tmp below, at z=0. Should be in [len(wavelength)]. + fm_tmp : numpy.array + Fluxes for various templates. Should be in a shape of [ n * len(wavelength)], + where n is the number templates. + zliml : float + Lowest redshift for fitting range. + zlimu : float + Highest redshift for fitting range. + prior : numpy.array + Prior used for the redshift determination. E.g., Eazy z-probability. + method : str + Method for minimization. The option must be taken from lmfit. Powell is more accurate. Nelder is faster. + + Returns + ------- + zspace : numpy.array + Array for redshift grid. + chi2s : numpy.array + Array of chi2 values corresponding to zspace. + ''' + import scipy.interpolate as interpolate + + zspace = np.arange(zliml,zlimu,delzz) + chi2s = np.zeros((len(zspace),2), 'float') + if prior == None: + prior = zspace[:] * 0 + 1.0 + + data_len = self.data['meta']['data_len'] + + NR = dict['NR'] + con0 = (NR=data_len[0]) & (NR=data_len[1]+data_len[0]) & (NR Date: Thu, 5 Jan 2023 11:28:16 -0800 Subject: [PATCH 08/20] Update plot_sed.py bug fixed --- gsf/plot_sed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gsf/plot_sed.py b/gsf/plot_sed.py index 4bd4f5e..42bc20f 100644 --- a/gsf/plot_sed.py +++ b/gsf/plot_sed.py @@ -223,10 +223,10 @@ def gaus(x,a,x0,sigma): fg2 = fy[con2] eg2 = ey[con2] con_spec = (NR0 or len(xg1)>0 or len(xg2)>0: f_grsm = True + wave_spec_max = np.max(x[con_spec]) else: f_grsm = False From a6943187c64fcc25081b3114812cd81e257f5144 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Tue, 10 Jan 2023 09:46:16 -0800 Subject: [PATCH 09/20] Update gsf.py minor --- gsf/gsf.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/gsf/gsf.py b/gsf/gsf.py index 3b727f8..9a355d8 100644 --- a/gsf/gsf.py +++ b/gsf/gsf.py @@ -16,7 +16,8 @@ start = timeit.default_timer() -def run_gsf_template(inputs, fplt=0, tau_lim=0.001, idman=None, nthin=1, delwave=10): +def run_gsf_template(inputs, fplt=0, tau_lim=0.001, idman=None, nthin=1, delwave=10, + f_IGM=True): ''' Purpose ------- @@ -44,7 +45,7 @@ def run_gsf_template(inputs, fplt=0, tau_lim=0.001, idman=None, nthin=1, delwave # # 0. Make basic templates # - if fplt == 0 or fplt == 1: + if fplt == 0 or fplt == 1 or fplt == 2: # # 0. Make basic templates # @@ -72,9 +73,9 @@ def run_gsf_template(inputs, fplt=0, tau_lim=0.001, idman=None, nthin=1, delwave # 1. Start making redshifted templates. # if MB.SFH_FORM == -99: - maketemp(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + maketemp(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave, f_IGM=f_IGM) else: - maketemp_tau(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + maketemp_tau(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave, f_IGM=f_IGM) # Read temp from asdf; # This has to happend after fplt==1 and before fplt>=2. From c995136d8c5f4ad39a3957d2e75b23d24c53bbe8 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Wed, 11 Jan 2023 08:52:44 -0800 Subject: [PATCH 10/20] to mask out e=0 data --- gsf/fitting.py | 2 +- gsf/function.py | 1 - gsf/maketmp_filt.py | 4 ++++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index 60a7e2f..9efe818 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -726,7 +726,7 @@ def read_data(self, Cz0:float, Cz1:float, Cz2:float, zgal:float, add_fir:bool=Fa ey = np.append(ey02,ey_bb) wht = 1./np.square(ey) - con_wht = (ey<0) + con_wht = (ey<=0) wht[con_wht] = 0 # For now... diff --git a/gsf/function.py b/gsf/function.py index 0bc30a6..65e727a 100644 --- a/gsf/function.py +++ b/gsf/function.py @@ -285,7 +285,6 @@ def get_leastsq(MB, ZZtmp, fneld, age, fit_params, residual, fy, ey, wht, ID0, fit_params['Z'+str(aa)].value = ZZ f_fir = False - out_tmp = minimize(residual, fit_params, args=(fy, ey, wht, f_fir), method=fit_name, kws={'f_only_spec':f_only_spec}) diff --git a/gsf/maketmp_filt.py b/gsf/maketmp_filt.py index 1c9ace1..3ed0f6a 100644 --- a/gsf/maketmp_filt.py +++ b/gsf/maketmp_filt.py @@ -1105,6 +1105,8 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, file_tmp2 = 'tmp_library2%s.txt'%MB.ID fw = open(file_tmp,'w') fw.write('# BB data (>%d) in this file are not used in fitting.\n'%(ncolbb)) + + # this is for spec; for ii in range(len(lm)): g_offset = 0 #1000 * fgrs[ii] if lm[ii]/(1.+zbest) > lamliml and lm[ii]/(1.+zbest) < lamlimu and not np.isnan(fobs[ii]): @@ -1164,6 +1166,7 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, fw.write('%d %.5f %.5e %.5e %.1f %s\n'%(ii+ncolbb, ltmpbb[0,ii], fbb[ii], ebb[ii], FWFILT[ii]/2., SFILT[ii])) fw.close() fw_rem.close() + # register; dat = ascii.read(file_tmp, format='no_header') NRbb = dat['col1'] @@ -1183,6 +1186,7 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, dict_bb_obs_removed = {'NR':NR_ex, 'x':x_ex, 'fy':fy_ex, 'ey':ey_ex, 'ex':ex_ex} MB.data['bb_obs_removed'] = dict_bb_obs_removed except: + print('Some unknown error in maketmp_filt...') pass # Dust; Not sure where this is being used... From 5dd09c56252b273f0113c060de748c21a889b3d6 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Fri, 20 Jan 2023 15:34:00 -0800 Subject: [PATCH 11/20] Update fitting.py amplitude min range to -10. --- gsf/fitting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index 9efe818..7cf73f9 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -1245,7 +1245,7 @@ def set_param(self): self.Amin = float(self.inputs['AMIN']) self.Amax = float(self.inputs['AMAX']) except: - self.Amin = -5 + self.Amin = -10 self.Amax = 10 self.Aini = -1 else: From 11c1ec2f2bc27a76a698d45b7bab13854f937df9 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Fri, 20 Jan 2023 15:34:06 -0800 Subject: [PATCH 12/20] Update gsf.py minor --- gsf/gsf.py | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/gsf/gsf.py b/gsf/gsf.py index 9a355d8..4f1d694 100644 --- a/gsf/gsf.py +++ b/gsf/gsf.py @@ -176,17 +176,9 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman=None, zman=No if not flag_suc: sys.exit() - # Read temp from asdf; - # Template must be registered before fplt>=2. - # try: - # aftmp = MB.af - # except: - # MB.af = asdf.open(os.path.join(MB.DIR_TMP, 'spec_all_' + MB.ID + '.asdf')) - if fplt <= 2: MB.zprev = MB.zgal MB.ndim_keep = MB.ndim - # # 1. Start making redshifted templates, at z=MB.zgal. # @@ -195,6 +187,9 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman=None, zman=No else: flag_suc = maketemp_tau(MB, tau_lim=MB.tau_lim, nthin=MB.nthin, delwave=MB.delwave) + if not flag_suc: + return False + # # 2. Main fitting part. # @@ -230,18 +225,18 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman=None, zman=No if fplt <= 3 and flag_suc: - if True: #fplt >= 3: # If main fitting has not been done; - # make redshifted templates and register data; - - # Use the final redshift; - from astropy.io import fits - hd_sum = fits.open(os.path.join(MB.DIR_OUT, 'summary_%s.fits'%MB.ID))[0].header - MB.zgal = hd_sum['ZMC'] - - if MB.SFH_FORM == -99: - flag_suc = maketemp(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) - else: - flag_suc = maketemp_tau(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + # Use the final redshift; + from astropy.io import fits + hd_sum = fits.open(os.path.join(MB.DIR_OUT, 'summary_%s.fits'%MB.ID))[0].header + MB.zgal = hd_sum['ZMC'] + + if MB.SFH_FORM == -99: + flag_suc = maketemp(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + else: + flag_suc = maketemp_tau(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + + if not flag_suc: + return False if MB.SFH_FORM == -99: from .plot_sfh import plot_sfh From 8bf61a8f2bc3e683779e1f4f730c8dd814e58c84 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Fri, 20 Jan 2023 15:53:45 -0800 Subject: [PATCH 13/20] for error msg --- gsf/function.py | 19 +++++++++++++++++++ gsf/maketmp_filt.py | 3 +++ requirements.txt | 1 + 3 files changed, 23 insertions(+) diff --git a/gsf/function.py b/gsf/function.py index 65e727a..cdb847b 100644 --- a/gsf/function.py +++ b/gsf/function.py @@ -7,6 +7,7 @@ import scipy.interpolate as interpolate from scipy.interpolate import interp1d import logging +from colorama import Fore, Back, Style ################ # Line library @@ -17,6 +18,24 @@ c = 3.e18 # A/s +def print_err(msg, exit=False): + ''' + ''' + print(Fore.RED) + print('$$$ ================= $$$') + print('$$$ gsf error message $$$') + print(Fore.RED) + print(msg) + print(Fore.RED) + print('$$$ ================= $$$') + print(Style.RESET_ALL) + + if exit: + print(Fore.CYAN) + print('Exiting.') + print(Style.RESET_ALL) + sys.exit() + def str2bool(v): ''' ''' diff --git a/gsf/maketmp_filt.py b/gsf/maketmp_filt.py index 3ed0f6a..45e3182 100644 --- a/gsf/maketmp_filt.py +++ b/gsf/maketmp_filt.py @@ -330,6 +330,9 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, try: af = MB.af0 except: + if not os.path.exists(os.path.join(DIR_TMP, 'spec_all.asdf')): + msg = 'The z=0 template is missing in %s directory.\nCheck your configuration file (`DIR_TEMP`) or run with flag=0.'%(DIR_TMP) + print_err(msg, exit=True) af = asdf.open(os.path.join(DIR_TMP, 'spec_all.asdf')) MB.af0 = af diff --git a/requirements.txt b/requirements.txt index 58450ef..7a9bec5 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ asdf astropy +colorama corner emcee lmfit From 2598c4d3a357ecd03ca7f0db29173f221da5b19f Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Fri, 20 Jan 2023 17:57:54 -0800 Subject: [PATCH 14/20] error message on input typoes --- gsf/fitting.py | 14 +++++++++++++- gsf/function.py | 17 +++++++++++++---- gsf/maketmp_filt.py | 15 +++++++-------- gsf/plot_sfh.py | 20 +++++++++----------- 4 files changed, 42 insertions(+), 24 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index 7cf73f9..5eaf342 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -21,7 +21,7 @@ import corner # import from custom codes -from .function import check_line_man, check_line_cz_man, calc_Dn4, savecpkl, get_leastsq +from .function import check_line_man, check_line_cz_man, calc_Dn4, savecpkl, get_leastsq, print_err from .zfit import check_redshift,get_chi2 from .writing import get_param from .function_class import Func @@ -1382,6 +1382,15 @@ def set_param(self): return fit_params + def check_mainbody(self): + ''' + To check any issues with the input params. + ''' + if len(self.age) != len(set(self.age)): + msg = 'Input age has duplications. Check `AGE`.' + print_err(msg, exit=True, details=None) + + def prepare_class(self, add_fir=None): ''' ''' @@ -1517,6 +1526,9 @@ def main(self, cornerplot:bool=True, specplot=1, sigz=1.0, ezmin=0.01, ferr=0, # Prepare library, data, etc. self.prepare_class() + # Check Main Body; + self.check_mainbody() + print('########################') print('### Fitting Function ###') print('########################') diff --git a/gsf/function.py b/gsf/function.py index cdb847b..ac27f34 100644 --- a/gsf/function.py +++ b/gsf/function.py @@ -8,6 +8,7 @@ from scipy.interpolate import interp1d import logging from colorama import Fore, Back, Style +from datetime import datetime ################ # Line library @@ -18,16 +19,24 @@ c = 3.e18 # A/s -def print_err(msg, exit=False): +def print_err(msg, exit=False, details=None): ''' ''' + now = datetime.now() print(Fore.RED) - print('$$$ ================= $$$') - print('$$$ gsf error message $$$') + print('$$$ =================== $$$') + print('$$$ gsf error message $$$') + print('%s'%now) + if not details == None: + # @@@ This does not work?? + exc_type, exc_obj, exc_tb = details + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + print(exc_type, fname, exc_tb.tb_lineno) + + print('$$$ =================== $$$') print(Fore.RED) print(msg) print(Fore.RED) - print('$$$ ================= $$$') print(Style.RESET_ALL) if exit: diff --git a/gsf/maketmp_filt.py b/gsf/maketmp_filt.py index 45e3182..b01fcd6 100644 --- a/gsf/maketmp_filt.py +++ b/gsf/maketmp_filt.py @@ -149,9 +149,6 @@ def check_library(MB, af, nround=3): print('No of age pixels:', len(MB.age), len(af['ML']['ms_0'])) flag = False else: - # if len(af['ML']['ms_0_0']) != len(MB.age): - # print('No of age pixels:', len(MB.age), len(af['ML']['ms_0_0'])) - # flag = False flag = True # Matallicity: @@ -1105,7 +1102,7 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, # The following files are just temporary. file_tmp = 'tmp_library_%s.txt'%MB.ID - file_tmp2 = 'tmp_library2%s.txt'%MB.ID + file_tmp2 = 'tmp_library2_%s.txt'%MB.ID fw = open(file_tmp,'w') fw.write('# BB data (>%d) in this file are not used in fitting.\n'%(ncolbb)) @@ -1179,7 +1176,7 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, exbb = dat['col5'] dict_bb_obs = {'NR':NRbb, 'x':xbb, 'fy':fybb, 'ey':eybb, 'ex':exbb} MB.data['bb_obs'] = dict_bb_obs - try: + if len(SKIPFILT)>0:#try: dat = ascii.read(file_tmp2, format='no_header') NR_ex = dat['col1'] x_ex = dat['col2'] @@ -1188,9 +1185,11 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, ex_ex = dat['col5'] dict_bb_obs_removed = {'NR':NR_ex, 'x':x_ex, 'fy':fy_ex, 'ey':ey_ex, 'ex':ex_ex} MB.data['bb_obs_removed'] = dict_bb_obs_removed - except: - print('Some unknown error in maketmp_filt...') - pass + # else: + # MB.data['bb_obs_removed'] = {} + # else:#except: + # print('Some unknown error in maketmp_filt...') + # pass # Dust; Not sure where this is being used... fw = open(file_tmp,'w') diff --git a/gsf/plot_sfh.py b/gsf/plot_sfh.py index 2532d6e..f8357e9 100644 --- a/gsf/plot_sfh.py +++ b/gsf/plot_sfh.py @@ -174,7 +174,8 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f delT[aa] = tau_lim else: delT[aa] = delTu[aa] + delTl[aa] - else: # This is only true when CSP... + else: + # @@@ Note: This is only true when CSP...? for aa in range(len(age)): if aa == 0: delTl[aa] = age[aa] @@ -329,16 +330,13 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f Zrand = np.random.uniform(-eZ[aa],eZ[aa]) f_m_sur = sedpar0['frac_mass_survive_%d'%nZtmp][aa] - if True:# Age limit here? - # quantity in log scale; - AM[aa, mm] = AAtmp[aa] + np.log10(mslist[aa]) + Arand - AL[aa, mm] = AM[aa,mm] - np.log10(mslist[aa]) - SF[aa, mm] = AAtmp[aa] + np.log10(mslist[aa] / delT[aa] / f_m_sur) + Arand # / ml - ZM[aa, mm] = ZZtmp[aa] + Zrand - ZMM[aa, mm]= ZZtmp[aa] + AAtmp[aa] + np.log10(mslist[aa]) + Zrand - ZML[aa, mm]= ZMM[aa,mm] - np.log10(mslist[aa]) - if SF[aa, mm] < -10: - SF[aa, mm] = np.nan + # quantity in log scale; + AM[aa, mm] = AAtmp[aa] + np.log10(mslist[aa]) + Arand + AL[aa, mm] = AM[aa,mm] - np.log10(mslist[aa]) + SF[aa, mm] = AAtmp[aa] + np.log10(mslist[aa] / delT[aa] / f_m_sur) + Arand # / ml + ZM[aa, mm] = ZZtmp[aa] + Zrand + ZMM[aa, mm]= ZZtmp[aa] + AAtmp[aa] + np.log10(mslist[aa]) + Zrand + ZML[aa, mm]= ZMM[aa,mm] - np.log10(mslist[aa]) # SFR from SED. This will be converted in log later; if age[aa]<=tset_SFR_SED: From 0a16b259acce61a79298592e5f1525bb0526c024 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Mon, 30 Jan 2023 13:55:57 -0800 Subject: [PATCH 15/20] individual templates to be removed --- gsf/fitting.py | 1 + gsf/gsf.py | 44 ++-------- gsf/maketmp_filt.py | 8 +- gsf/plot_sfh.py | 190 ++++++++++++++++++++++---------------------- 4 files changed, 110 insertions(+), 133 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index 5eaf342..0b47ab9 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -74,6 +74,7 @@ def __init__(self, inputs, c:float=3e18, Mpc_cm:float=3.08568025e+24, m0set:floa ''' self.update_input(inputs, idman=idman, zman=zman) self.NRbb_lim = NRbb_lim + self.ztemplate = False def update_input(self, inputs, c:float=3e18, Mpc_cm:float=3.08568025e+24, m0set:float=25.0, pixelscale:float=0.06, Lsun:float=3.839*1e33, cosmo=None, diff --git a/gsf/gsf.py b/gsf/gsf.py index 4f1d694..efe8fce 100644 --- a/gsf/gsf.py +++ b/gsf/gsf.py @@ -4,6 +4,7 @@ import os.path import string from astropy.cosmology import WMAP9 as cosmo +from astropy.io import fits import asdf # From gsf @@ -77,33 +78,6 @@ def run_gsf_template(inputs, fplt=0, tau_lim=0.001, idman=None, nthin=1, delwave else: maketemp_tau(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave, f_IGM=f_IGM) - # Read temp from asdf; - # This has to happend after fplt==1 and before fplt>=2. - # MB.af = asdf.open(MB.DIR_TMP + 'spec_all_' + MB.ID + '.asdf') - - ''' - # - # 2. Load templates - # - MB.lib = MB.fnc.open_spec_fits(MB, fall=0) - MB.lib_all = MB.fnc.open_spec_fits(MB, fall=1) - if MB.f_dust: - MB.lib_dust = MB.fnc.open_spec_dust_fits(MB, fall=0) - MB.lib_dust_all = MB.fnc.open_spec_dust_fits(MB, fall=1) - ''' - - # How to get SED? - if False: - import matplotlib.pyplot as plt - for T in MB.age[:1]: - y0, x0 = MB.fnc.get_template(MB.lib_all, Amp=1.0, T=T, Av=0.0, Z=-1.0, zgal=MB.zgal) - plt.plot(x0/(1.+MB.zgal),y0,linestyle='-',lw=1.0, label='$T=%.2f$\n$z=%.2f$'%(T,MB.zgal)) - plt.xlim(600,20000) - plt.xlabel('Rest frame wavelength') - plt.xscale('log') - plt.legend(loc=0) - plt.show() - return MB @@ -226,17 +200,16 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman=None, zman=No if fplt <= 3 and flag_suc: # Use the final redshift; - from astropy.io import fits hd_sum = fits.open(os.path.join(MB.DIR_OUT, 'summary_%s.fits'%MB.ID))[0].header MB.zgal = hd_sum['ZMC'] - - if MB.SFH_FORM == -99: - flag_suc = maketemp(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) - else: - flag_suc = maketemp_tau(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) - if not flag_suc: - return False + if not MB.ztemplate: + if MB.SFH_FORM == -99: + flag_suc = maketemp(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + else: + flag_suc = maketemp_tau(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + if not flag_suc: + return False if MB.SFH_FORM == -99: from .plot_sfh import plot_sfh @@ -268,7 +241,6 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman=None, zman=No if fplt == 6: # Use the final redshift; - from astropy.io import fits hd_sum = fits.open(os.path.join(MB.DIR_OUT, 'summary_%s.fits'%MB.ID))[0].header MB.zgal = hd_sum['ZMC'] diff --git a/gsf/maketmp_filt.py b/gsf/maketmp_filt.py index b01fcd6..63b3cf6 100644 --- a/gsf/maketmp_filt.py +++ b/gsf/maketmp_filt.py @@ -1088,10 +1088,13 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, print('dust updated.') # Save; + file_asdf = os.path.join(DIR_TMP, 'spec_all_' + MB.ID + '.asdf') af = asdf.AsdfFile(tree) - af.write_to(DIR_TMP + 'spec_all_' + MB.ID + '.asdf', all_array_compression='zlib') + af.write_to(file_asdf, all_array_compression='zlib') # Loading the redshifted template; - MB.af = asdf.open(MB.DIR_TMP + 'spec_all_' + MB.ID + '.asdf') + MB.af = asdf.open(file_asdf) + # Remove file? + os.system('rm %s'%file_asdf) ########################################## # For data; @@ -1212,6 +1215,7 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, print('Done making templates at z=%.2f.\n'%zbest) os.system('rm %s %s'%(file_tmp, file_tmp2)) + MB.ztemplate = True return True diff --git a/gsf/plot_sfh.py b/gsf/plot_sfh.py index f8357e9..db54007 100644 --- a/gsf/plot_sfh.py +++ b/gsf/plot_sfh.py @@ -143,7 +143,7 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f SN = 1 Asum = 0 - A50 = np.arange(len(age), dtype='float') + A50 = np.arange(len(age), dtype=float) for aa in range(len(A50)): A50[aa] = 10**hdul[1].data['A'+str(aa)][1] Asum += A50[aa] @@ -157,9 +157,9 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f Tuni = MB.cosmo.age(zbes).value #, use_flat=True, **cosmo) Tuni0 = (Tuni - age[:]) - delT = np.zeros(len(age),dtype='float') - delTl = np.zeros(len(age),dtype='float') - delTu = np.zeros(len(age),dtype='float') + delT = np.zeros(len(age),dtype=float) + delTl = np.zeros(len(age),dtype=float) + delTu = np.zeros(len(age),dtype=float) if len(age) == 1: for aa in range(len(age)): @@ -227,18 +227,18 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f ###################### # Mass-to-Light ratio. ###################### - AM = np.zeros((len(age), mmax), dtype='float') # Mass in each bin. - AC = np.zeros((len(age), mmax), dtype='float') -99 # Cumulative mass in each bin. - AL = np.zeros((len(age), mmax), dtype='float') # Cumulative light in each bin. - ZM = np.zeros((len(age), mmax), dtype='float') # Z. - ZC = np.zeros((len(age), mmax), dtype='float') -99 # Cumulative Z. - ZL = np.zeros((len(age), mmax), dtype='float') -99 # Light weighted cumulative Z. - TC = np.zeros((len(age), mmax), dtype='float') # Mass weighted T. - TL = np.zeros((len(age), mmax), dtype='float') # Light weighted T. - ZMM= np.zeros((len(age), mmax), dtype='float') # Mass weighted Z. - ZML= np.zeros((len(age), mmax), dtype='float') # Light weighted Z. - SF = np.zeros((len(age), mmax), dtype='float') # SFR - Av = np.zeros(mmax, dtype='float') # SFR + AM = np.zeros((len(age), mmax), dtype=float) # Mass in each bin. + AC = np.zeros((len(age), mmax), dtype=float) -99 # Cumulative mass in each bin. + AL = np.zeros((len(age), mmax), dtype=float) # Cumulative light in each bin. + ZM = np.zeros((len(age), mmax), dtype=float) # Z. + ZC = np.zeros((len(age), mmax), dtype=float) -99 # Cumulative Z. + ZL = np.zeros((len(age), mmax), dtype=float) -99 # Light weighted cumulative Z. + TC = np.zeros((len(age), mmax), dtype=float) # Mass weighted T. + TL = np.zeros((len(age), mmax), dtype=float) # Light weighted T. + ZMM= np.zeros((len(age), mmax), dtype=float) # Mass weighted Z. + ZML= np.zeros((len(age), mmax), dtype=float) # Light weighted Z. + SF = np.zeros((len(age), mmax), dtype=float) # SFR + Av = np.zeros(mmax, dtype=float) # SFR # ############################## @@ -281,17 +281,17 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f # Get SED based SFR ##################### f_SFRSED_plot = False - SFR_SED = np.zeros(mmax,dtype='float') + SFR_SED = np.zeros(mmax,dtype=float) # ASDF; - af = asdf.open(MB.DIR_TMP + 'spec_all_' + MB.ID + '.asdf') + af = MB.af #asdf.open(MB.DIR_TMP + 'spec_all_' + MB.ID + '.asdf') af0 = asdf.open(MB.DIR_TMP + 'spec_all.asdf') sedpar = af['ML'] # For M/L sedpar0 = af0['ML'] # For mass loss frac. - AAtmp = np.zeros(len(age), dtype='float') - ZZtmp = np.zeros(len(age), dtype='float') - mslist= np.zeros(len(age), dtype='float') + AAtmp = np.zeros(len(age), dtype=float) + ZZtmp = np.zeros(len(age), dtype=float) + mslist= np.zeros(len(age), dtype=float) for mm in range(mmax): delt_tot = 0 @@ -400,12 +400,12 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f ############# # Plot ############# - AMp = np.zeros((len(age),3), dtype='float') - ACp = np.zeros((len(age),3), dtype='float') - ZMp = np.zeros((len(age),3), dtype='float') - ZCp = np.zeros((len(age),3), dtype='float') - ZLp = np.zeros((len(age),3), dtype='float') - SFp = np.zeros((len(age),3), dtype='float') + AMp = np.zeros((len(age),3), dtype=float) + ACp = np.zeros((len(age),3), dtype=float) + ZMp = np.zeros((len(age),3), dtype=float) + ZCp = np.zeros((len(age),3), dtype=float) + ZLp = np.zeros((len(age),3), dtype=float) + SFp = np.zeros((len(age),3), dtype=float) for aa in range(len(age)): AMp[aa,:] = np.nanpercentile(AM[aa,:], [16,50,84]) ACp[aa,:] = np.nanpercentile(AC[aa,:], [16,50,84]) @@ -421,7 +421,7 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f linestyle='', color='orange', lw=1., marker='*',ms=8,zorder=-2) ################### - msize = np.zeros(len(age), dtype='float') + msize = np.zeros(len(age), dtype=float) for aa in range(len(age)): if A50[aa]/Asum>flim: # if >1% msize[aa] = 200 * A50[aa]/Asum @@ -544,7 +544,7 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f zred = [zbes, 20] zredl = ['$z_\mathrm{obs.}$', 20] - Tzz = np.zeros(len(zred), dtype='float') + Tzz = np.zeros(len(zred), dtype=float) for zz in range(len(zred)): Tzz[zz] = (Tuni - MB.cosmo.age(zred[zz]).value) if Tzz[zz] < Txmin: @@ -814,8 +814,8 @@ def sfr_tau(t0, tau0, Z=0.0, sfh=0, tt=np.arange(0,13,0.1), Mtot=1., in Msun ''' - yy = np.zeros(len(tt), dtype='float') - yyms = np.zeros(len(tt), dtype='float') + yy = np.zeros(len(tt), dtype=float) + yyms = np.zeros(len(tt), dtype=float) con = (tt<=t0) if sfh == 1: yy[con] = np.exp((tt[con]-t0)/tau0) @@ -966,7 +966,7 @@ def plot_sfh_tau(MB, f_comp=0, flim=0.01, lsfrl=-1, mmax=1000, Txmin=0.08, Txmax SN = 1 Asum = 0 - A50 = np.arange(len(age), dtype='float') + A50 = np.arange(len(age), dtype=float) for aa in range(len(A50)): A50[aa] = 10**hdul[1].data['A'+str(aa)][1] Asum += A50[aa] @@ -980,9 +980,9 @@ def plot_sfh_tau(MB, f_comp=0, flim=0.01, lsfrl=-1, mmax=1000, Txmin=0.08, Txmax Tuni = MB.cosmo.age(zbes).value #, use_flat=True, **cosmo) Tuni0 = (Tuni - age[:]) - delT = np.zeros(len(age),dtype='float') - delTl = np.zeros(len(age),dtype='float') - delTu = np.zeros(len(age),dtype='float') + delT = np.zeros(len(age),dtype=float) + delTl = np.zeros(len(age),dtype=float) + delTu = np.zeros(len(age),dtype=float) if len(age) == 1: #if tau0[0] < 0: # SSP; @@ -1048,18 +1048,18 @@ def plot_sfh_tau(MB, f_comp=0, flim=0.01, lsfrl=-1, mmax=1000, Txmin=0.08, Txmax ###################### # Mass-to-Light ratio. ###################### - AM = np.zeros((len(age), mmax), dtype='float') # Mass in each bin. - AC = np.zeros((len(age), mmax), dtype='float') -99 # Cumulative mass in each bin. - AL = np.zeros((len(age), mmax), dtype='float') # Cumulative light in each bin. - ZM = np.zeros((len(age), mmax), dtype='float') # Z. - ZC = np.zeros((len(age), mmax), dtype='float') -99 # Cumulative Z. - ZL = np.zeros((len(age), mmax), dtype='float') -99 # Light weighted cumulative Z. - TC = np.zeros((len(age), mmax), dtype='float') # Mass weighted T. - TL = np.zeros((len(age), mmax), dtype='float') # Light weighted T. - ZMM = np.zeros((len(age), mmax), dtype='float') # Mass weighted Z. - ZML= np.zeros((len(age), mmax), dtype='float') # Light weighted Z. - SF = np.zeros((len(age), mmax), dtype='float') # SFR - Av = np.zeros(mmax, dtype='float') # SFR + AM = np.zeros((len(age), mmax), dtype=float) # Mass in each bin. + AC = np.zeros((len(age), mmax), dtype=float) -99 # Cumulative mass in each bin. + AL = np.zeros((len(age), mmax), dtype=float) # Cumulative light in each bin. + ZM = np.zeros((len(age), mmax), dtype=float) # Z. + ZC = np.zeros((len(age), mmax), dtype=float) -99 # Cumulative Z. + ZL = np.zeros((len(age), mmax), dtype=float) -99 # Light weighted cumulative Z. + TC = np.zeros((len(age), mmax), dtype=float) # Mass weighted T. + TL = np.zeros((len(age), mmax), dtype=float) # Light weighted T. + ZMM = np.zeros((len(age), mmax), dtype=float) # Mass weighted Z. + ZML= np.zeros((len(age), mmax), dtype=float) # Light weighted Z. + SF = np.zeros((len(age), mmax), dtype=float) # SFR + Av = np.zeros(mmax, dtype=float) # SFR # ############################## @@ -1074,7 +1074,7 @@ def plot_sfh_tau(MB, f_comp=0, flim=0.01, lsfrl=-1, mmax=1000, Txmin=0.08, Txmax # Get SED based SFR ##################### f_SFRSED_plot = False - SFR_SED = np.zeros(mmax,dtype='float') + SFR_SED = np.zeros(mmax,dtype=float) # ASDF; af = asdf.open(MB.DIR_TMP + 'spec_all_' + MB.ID + '.asdf') @@ -1084,15 +1084,15 @@ def plot_sfh_tau(MB, f_comp=0, flim=0.01, lsfrl=-1, mmax=1000, Txmin=0.08, Txmax ttmin = 0.001 tt = np.arange(ttmin,Tuni+0.5,ttmin/10) - xSF = np.zeros((len(tt), mmax), dtype='float') # SFR - ySF = np.zeros((len(tt), mmax), dtype='float') # SFR - yMS = np.zeros((len(tt), mmax), dtype='float') # MFR - ySF_each = np.zeros((MB.npeak, len(tt), mmax), dtype='float') # SFR - yMS_each = np.zeros((MB.npeak, len(tt), mmax), dtype='float') # MFR + xSF = np.zeros((len(tt), mmax), dtype=float) # SFR + ySF = np.zeros((len(tt), mmax), dtype=float) # SFR + yMS = np.zeros((len(tt), mmax), dtype=float) # MFR + ySF_each = np.zeros((MB.npeak, len(tt), mmax), dtype=float) # SFR + yMS_each = np.zeros((MB.npeak, len(tt), mmax), dtype=float) # MFR - ZZmc = np.zeros((MB.npeak, mmax), dtype='float') - TTmc = np.zeros((MB.npeak, mmax), dtype='float') - TAmc = np.zeros((MB.npeak, mmax), dtype='float') + ZZmc = np.zeros((MB.npeak, mmax), dtype=float) + TTmc = np.zeros((MB.npeak, mmax), dtype=float) + TAmc = np.zeros((MB.npeak, mmax), dtype=float) if Txmin > np.min(tt): Txmin = np.min(tt) * 0.8 @@ -1150,11 +1150,11 @@ def plot_sfh_tau(MB, f_comp=0, flim=0.01, lsfrl=-1, mmax=1000, Txmin=0.08, Txmax ############# # Plot ############# - xSFp = np.zeros((len(tt),3), dtype='float') - ySFp = np.zeros((len(tt),3), dtype='float') - yMSp = np.zeros((len(tt),3), dtype='float') - ySFp_each = np.zeros((MB.npeak, len(tt), 3), dtype='float') - yMSp_each = np.zeros((MB.npeak, len(tt), 3), dtype='float') + xSFp = np.zeros((len(tt),3), dtype=float) + ySFp = np.zeros((len(tt),3), dtype=float) + yMSp = np.zeros((len(tt),3), dtype=float) + ySFp_each = np.zeros((MB.npeak, len(tt), 3), dtype=float) + yMSp_each = np.zeros((MB.npeak, len(tt), 3), dtype=float) for ii in range(len(tt)): xSFp[ii,:] = np.percentile(xSF[ii,:], [16,50,84]) ySFp[ii,:] = np.percentile(ySF[ii,:], [16,50,84]) @@ -1170,19 +1170,19 @@ def plot_sfh_tau(MB, f_comp=0, flim=0.01, lsfrl=-1, mmax=1000, Txmin=0.08, Txmax ax1.plot(xSFp[:,1], np.log10(ySFp[:,1]), linestyle='-', color='k', alpha=1., zorder=-1, lw=0.5) ax2.plot(xSFp[:,1], np.log10(yMSp[:,1]), linestyle='-', color='k', alpha=1., zorder=-1, lw=0.5) - ACp = np.zeros((len(tt),3),'float') - SFp = np.zeros((len(tt),3),'float') + ACp = np.zeros((len(tt),3),float) + SFp = np.zeros((len(tt),3),float) ACp[:] = np.log10(yMSp[:,:]) SFp[:] = np.log10(ySFp[:,:]) SFR_SED_med = np.percentile(SFR_SED[:],[16,50,84]) ################### - msize = np.zeros(len(age), dtype='float') + msize = np.zeros(len(age), dtype=float) # Metal - ZCp = np.zeros((MB.npeak,3),'float') - TCp = np.zeros((MB.npeak,3),'float') - TTp = np.zeros((MB.npeak,3),'float') + ZCp = np.zeros((MB.npeak,3),float) + TCp = np.zeros((MB.npeak,3),float) + TTp = np.zeros((MB.npeak,3),float) for aa in range(len(age)): if A50[aa]/Asum>flim: # if >1% msize[aa] = 200 * A50[aa]/Asum @@ -1267,7 +1267,7 @@ def plot_sfh_tau(MB, f_comp=0, flim=0.01, lsfrl=-1, mmax=1000, Txmin=0.08, Txmax zred = [zbes, 20] zredl = ['$z_\mathrm{obs.}$', 20] - Tzz = np.zeros(len(zred), dtype='float') + Tzz = np.zeros(len(zred), dtype=float) for zz in range(len(zred)): Tzz[zz] = (Tuni - MB.cosmo.age(zred[zz]).value) if Tzz[zz] < Txmin: @@ -1544,7 +1544,7 @@ def get_evolv(MB, ID, Z=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, 1 Asum = 0 - A50 = np.arange(len(age), dtype='float') + A50 = np.arange(len(age), dtype=float) for aa in range(len(A50)): A50[aa] = hdul[1].data['A'+str(aa)][1] Asum += A50[aa] @@ -1558,9 +1558,9 @@ def get_evolv(MB, ID, Z=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, 1 Tuni = MB.cosmo.age(zbes).value Tuni0 = (Tuni - age[:]) - delT = np.zeros(len(age),dtype='float') - delTl = np.zeros(len(age),dtype='float') - delTu = np.zeros(len(age),dtype='float') + delT = np.zeros(len(age),dtype=float) + delTl = np.zeros(len(age),dtype=float) + delTu = np.zeros(len(age),dtype=float) for aa in range(len(age)): if aa == 0: delTl[aa] = age[aa] @@ -1606,18 +1606,18 @@ def get_evolv(MB, ID, Z=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, 1 ###################### # Mass-to-Light ratio. ###################### - AM = np.zeros((len(age), mmax), dtype='float') # Mass in each bin. - AC = np.zeros((len(age), mmax), dtype='float') # Cumulative mass in each bin. - AL = np.zeros((len(age), mmax), dtype='float') # Cumulative light in each bin. - ZM = np.zeros((len(age), mmax), dtype='float') # Z. - ZC = np.zeros((len(age), mmax), dtype='float') # Cumulative Z. - ZL = np.zeros((len(age), mmax), dtype='float') # Light weighted cumulative Z. - TC = np.zeros((len(age), mmax), dtype='float') # Mass weighted T. - TL = np.zeros((len(age), mmax), dtype='float') # Light weighted T. - ZMM= np.zeros((len(age), mmax), dtype='float') # Mass weighted Z. - ZML= np.zeros((len(age), mmax), dtype='float') # Light weighted Z. - SF = np.zeros((len(age), mmax), dtype='float') # SFR - Av = np.zeros(mmax, dtype='float') # SFR + AM = np.zeros((len(age), mmax), dtype=float) # Mass in each bin. + AC = np.zeros((len(age), mmax), dtype=float) # Cumulative mass in each bin. + AL = np.zeros((len(age), mmax), dtype=float) # Cumulative light in each bin. + ZM = np.zeros((len(age), mmax), dtype=float) # Z. + ZC = np.zeros((len(age), mmax), dtype=float) # Cumulative Z. + ZL = np.zeros((len(age), mmax), dtype=float) # Light weighted cumulative Z. + TC = np.zeros((len(age), mmax), dtype=float) # Mass weighted T. + TL = np.zeros((len(age), mmax), dtype=float) # Light weighted T. + ZMM= np.zeros((len(age), mmax), dtype=float) # Mass weighted Z. + ZML= np.zeros((len(age), mmax), dtype=float) # Light weighted Z. + SF = np.zeros((len(age), mmax), dtype=float) # SFR + Av = np.zeros(mmax, dtype=float) # SFR # ############################## # Add simulated scatter in quad @@ -1655,9 +1655,9 @@ def get_evolv(MB, ID, Z=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, 1 mm = 0 for mm in range(mmax): mtmp = np.random.randint(len(samples))# + Nburn - AAtmp = np.zeros(len(age), dtype='float') - ZZtmp = np.zeros(len(age), dtype='float') - mslist= np.zeros(len(age), dtype='float') + AAtmp = np.zeros(len(age), dtype=float) + ZZtmp = np.zeros(len(age), dtype=float) + mslist= np.zeros(len(age), dtype=float) Av_tmp = samples['Av'][mtmp] @@ -1718,11 +1718,11 @@ def get_evolv(MB, ID, Z=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, 1 ############# # Plot ############# - AMp = np.zeros((len(age),3), dtype='float') - ACp = np.zeros((len(age),3), dtype='float') - ZMp = np.zeros((len(age),3), dtype='float') - ZCp = np.zeros((len(age),3), dtype='float') - SFp = np.zeros((len(age),3), dtype='float') + AMp = np.zeros((len(age),3), dtype=float) + ACp = np.zeros((len(age),3), dtype=float) + ZMp = np.zeros((len(age),3), dtype=float) + ZCp = np.zeros((len(age),3), dtype=float) + SFp = np.zeros((len(age),3), dtype=float) for aa in range(len(age)): AMp[aa,:] = np.percentile(AM[aa,:], [16,50,84]) ACp[aa,:] = np.percentile(AC[aa,:], [16,50,84]) @@ -1731,7 +1731,7 @@ def get_evolv(MB, ID, Z=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, 1 SFp[aa,:] = np.percentile(SF[aa,:], [16,50,84]) ################### - msize = np.zeros(len(age), dtype='float') + msize = np.zeros(len(age), dtype=float) for aa in range(len(age)): if A50[aa]/Asum>flim: # if >1% msize[aa] = 150 * A50[aa]/Asum @@ -1766,7 +1766,7 @@ def get_evolv(MB, ID, Z=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, 1 for ss in range(len(t_get)): wave0, flux0 = sp.get_spectrum(tage=t_get[ss], peraa=True) if ss == 0: - spec_mul_nu_conv = np.zeros((len(t_get),len(wave0)),dtype='float') + spec_mul_nu_conv = np.zeros((len(t_get),len(wave0)),dtype=float) print('Template %d is processed.'%(ss)) wavetmp = wave0*(1.+zbes) @@ -1885,7 +1885,7 @@ def plot_evolv(MB, ID, Z=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, SN = 1 Asum = 0 - A50 = np.arange(len(age), dtype='float') + A50 = np.arange(len(age), dtype=float) for aa in range(len(A50)): A50[aa] = hdul[1].data['A'+str(aa)][1] Asum += A50[aa] From a4cb478db9284473a787ebf79902302d35621240 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Mon, 30 Jan 2023 14:34:06 -0800 Subject: [PATCH 16/20] minor update in plot --- gsf/gsf.py | 30 ++++++++++++----------------- gsf/maketmp_filt.py | 47 +++++++++++++++++---------------------------- gsf/plot_sfh.py | 31 ++++++++++++++++++------------ 3 files changed, 49 insertions(+), 59 deletions(-) diff --git a/gsf/gsf.py b/gsf/gsf.py index efe8fce..e6f3449 100644 --- a/gsf/gsf.py +++ b/gsf/gsf.py @@ -82,9 +82,11 @@ def run_gsf_template(inputs, fplt=0, tau_lim=0.001, idman=None, nthin=1, delwave def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman=None, zman=None, f_label=True, f_symbol=True, - f_SFMS=False, f_fill=True, save_sed=True, figpdf=False, mmax=300, skip_sfh=False, f_fancyplot=False, - skip_zhist=False, tau_lim=0.001, tset_SFR_SED=0.1, f_shuffle=False, amp_shuffle=1e-2, Zini=None, - nthin=1, delwave=1, f_plot_resid=False, scale=1e-19, f_plot_filter=True, f_prior_sfh=False, norder_sfh_prior=3): + f_SFMS=False, f_fill=True, save_sed=True, figpdf=False, mmax=300, f_prior_sfh=False, norder_sfh_prior=3, + f_shuffle=False, amp_shuffle=1e-2, Zini=None, tau_lim=0.001, + skip_sfh=False, f_fancyplot=False, skip_zhist=False, f_sfh_yaxis_force=True, tset_SFR_SED=0.1, + nthin=1, delwave=1, f_plot_resid=False, scale=1e-19, f_plot_filter=True + ): ''' Purpose ------- @@ -221,33 +223,25 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman=None, zman=No if not skip_sfh: plot_sfh(MB, fil_path=MB.DIR_FILT, mmax=mmax, dust_model=MB.dust_model, DIR_TMP=MB.DIR_TMP, f_silence=True, - f_SFMS=f_SFMS, f_symbol=f_symbol, skip_zhist=skip_zhist, tau_lim=tau_lim, tset_SFR_SED=tset_SFR_SED) + f_SFMS=f_SFMS, f_symbol=f_symbol, skip_zhist=skip_zhist, + tau_lim=tau_lim, tset_SFR_SED=tset_SFR_SED, f_sfh_yaxis_force=f_sfh_yaxis_force) plot_sed(MB, fil_path=MB.DIR_FILT, figpdf=figpdf, save_sed=save_sed, mmax=mmax, dust_model=MB.dust_model, DIR_TMP=MB.DIR_TMP, f_label=f_label, f_fill=f_fill, f_fancyplot=f_fancyplot, f_plot_resid=f_plot_resid, scale=scale, f_plot_filter=f_plot_filter) - ''' - if fplt == 4: - from .plot_sfh import get_evolv - get_evolv(MB, MB.ID, MB.PA, MB.Zall, MB.age, f_comp=MB.ftaucomp, fil_path=MB.DIR_FILT, inputs=MB.inputs, dust_model=MB.dust_model, DIR_TMP=MB.DIR_TMP) - - - if fplt == 5: - from .plot_sfh import plot_evolv - plot_evolv(MB, MB.ID, MB.PA, MB.Zall, MB.age, f_comp=MB.ftaucomp, fil_path=MB.DIR_FILT, inputs=MB.inputs, dust_model=MB.dust_model, DIR_TMP=MB.DIR_TMP, nmc=10) - ''' if fplt == 6: # Use the final redshift; hd_sum = fits.open(os.path.join(MB.DIR_OUT, 'summary_%s.fits'%MB.ID))[0].header MB.zgal = hd_sum['ZMC'] - if MB.SFH_FORM == -99: - flag_suc = maketemp(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) - else: - flag_suc = maketemp_tau(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + if not MB.ztemplate: + if MB.SFH_FORM == -99: + flag_suc = maketemp(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) + else: + flag_suc = maketemp_tau(MB, tau_lim=tau_lim, nthin=nthin, delwave=delwave) if MB.SFH_FORM == -99: from .plot_sed import plot_corner_physparam_frame,plot_corner_physparam_summary diff --git a/gsf/maketmp_filt.py b/gsf/maketmp_filt.py index 63b3cf6..3e06e49 100644 --- a/gsf/maketmp_filt.py +++ b/gsf/maketmp_filt.py @@ -211,12 +211,12 @@ def get_LSF(inputs, DIR_EXTR, ID, lm, wave_repr=4000, c=3e18, alp = 0 f_morp = True except Exception: - print('Error in reading morphology params.') - print('No morphology convolution.') + msg = 'Error in reading morphology params.\nNo morphology convolution.' + print_err(msg, exit=False) pass else: - print('MORP Keywords does not match.') - print('No morphology convolution.') + msg = 'MORP Keywords does not match.\nNo morphology convolution.' + print_err(msg, exit=False) ############################ # Template convolution; @@ -259,8 +259,8 @@ def get_LSF(inputs, DIR_EXTR, ID, lm, wave_repr=4000, c=3e18, print('Template convolution with Gaussian.') print('params is sigma;',sigma) else: - print('Something is wrong with the convolution file. Exiting.') - return False + msg = 'Something is wrong with the convolution file. Exiting.' + print_err(msg, exit=True) else: # For slit spectroscopy. To be updated... print('Templates convolution (intrinsic velocity).') @@ -317,11 +317,6 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, tau0 = MB.tau0 delz = 1.0 - # wid_z = MB.zmcmax - MB.zmcmin - # nz = int(wid_z/delz) - # if nz%2 == 0: - # nz += 1 - # MB.zbests = np.linspace(MB.zgal - wid_z/2., MB.zgal + wid_z/2., nz) MB.zbests = np.arange(MB.zgal, MB.zgal + 0.01, delz) try: @@ -339,8 +334,8 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, # Consistency check: flag = check_library(MB, af) if not flag: - print('\n!!!\nThere is inconsistency in z0 library and input file. Exiting.\n!!!\n') - sys.exit() + msg = 'There is inconsistency in z0 library and input file. Exiting.' + print_err(msg, exit=True) # ASDF Big tree; # Create header; @@ -379,11 +374,8 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, print(','.join(MB.filts)) print('') except: - print('########################') - print('Filter is not detected!!') - print('Make sure your \nfilter directory is correct.') - print('########################') - sys.exit() + msg = 'Filter is not detected!!\nMake sure your filter directory is correct.' + print_err(msg, exit=True) try: SKIPFILT = inputs['SKIPFILT'] @@ -466,7 +458,6 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, eobs[ii] = eobs0[ii1] MB.f_spec = True - # data_meta['data_len'] = np.append(data_meta['data_len'], len(lm0tmp)) data_meta['data_len'][ff] = len(lm0tmp) data_meta['data_origin'] = np.append(data_meta['data_origin'], '%s'%spec_file) data_meta['data_index'] = np.append(data_meta['data_index'], '%d'%ff) @@ -499,24 +490,22 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, ii0 = np.where(id0[:]==MB.ID) try: if len(ii0[0]) == 0: - print('Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(MB.ID)) - return False + msg = 'Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(MB.ID) + print_err(msg, exit=True) id = fd0[key_id][ii0] except: - print('Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(MB.ID)) - print(fd0) - return False + msg = 'Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(MB.ID) + print_err(msg, exit=True) fbb = np.zeros(len(SFILT), dtype=float) ebb = np.zeros(len(SFILT), dtype=float) - for ii in range(len(SFILT)): try: fbb[ii] = fd0['F%s'%(SFILT[ii])][ii0] ebb[ii] = fd0['E%s'%(SFILT[ii])][ii0] except: - print('Could not find flux inputs for filter %s in the input BB catalog! Exiting.'%(SFILT[ii])) - return False + msg = 'Could not find flux inputs for filter %s in the input BB catalog! Exiting.'%(SFILT[ii]) + print_err(msg, exit=True) elif CAT_BB_IND: # if individual photometric catalog; made in get_sdss.py unit = 'nu' @@ -567,8 +556,8 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, try: id = fd0[key_id][ii0] except: - print('Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(MB.ID)) - return False + msg = 'Could not find the column for [ID: %s] in the input BB catalog! Exiting.'%(MB.ID) + print_err(msg, exit=True) except: return False diff --git a/gsf/plot_sfh.py b/gsf/plot_sfh.py index db54007..182003c 100644 --- a/gsf/plot_sfh.py +++ b/gsf/plot_sfh.py @@ -20,7 +20,7 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, fil_path='./FILT/', dust_model=0, f_SFMS=False, f_symbol=True, verbose=False, f_silence=True, DIR_TMP=None, - f_log_sfh=True, dpi=250, TMIN=0.0001, tau_lim=0.01, skip_zhist=False, tset_SFR_SED=0.1, f_axis_force=True): + f_log_sfh=True, dpi=250, TMIN=0.0001, tau_lim=0.01, skip_zhist=False, tset_SFR_SED=0.1, f_sfh_yaxis_force=True): ''' Purpose ------- @@ -118,10 +118,10 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f chinu= hdul[1].data['chi'] try: - RA = hdul[0].header['RA'] - DEC = hdul[0].header['DEC'] + RA = hdul[0].header['RA'] + DEC = hdul[0].header['DEC'] except: - RA = 0 + RA = 0 DEC = 0 try: SN = hdul[0].header['SN'] @@ -154,7 +154,7 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f DL = MB.cosmo.luminosity_distance(zbes).value * Mpc_cm # Luminositydistance in cm Cons = (4.*np.pi*DL**2/(1.+zbes)) - Tuni = MB.cosmo.age(zbes).value #, use_flat=True, **cosmo) + Tuni = MB.cosmo.age(zbes).value Tuni0 = (Tuni - age[:]) delT = np.zeros(len(age),dtype=float) @@ -522,7 +522,13 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f ############# # For redshift if zbes<4: - if zbes<2: + if zbes<0.5: + zred = [zbes, 1, 5] + zredl = ['$z_\mathrm{obs.}$', 1, 5] + elif zbes<1: + zred = [zbes, 2, 6] + zredl = ['$z_\mathrm{obs.}$', 2, 6] + elif zbes<2: zred = [zbes, 2, 3, 6] zredl = ['$z_\mathrm{obs.}$', 2, 3, 6] elif zbes<2.5: @@ -557,14 +563,12 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f lsfrl = np.min(SFp[:,2])+0.1 if f_log_sfh: - if f_axis_force: + if f_sfh_yaxis_force: ax1.set_ylim(lsfrl, lsfru) ax1.set_ylabel('$\log \dot{M}_*/M_\odot$yr$^{-1}$', fontsize=12) - #ax1.plot(Tzz, Tzz*0+lsfru+(lsfru-lsfrl)*.00, marker='|', color='k', ms=3, linestyle='None') else: ax1.set_ylim(0, 10**lsfru) ax1.set_ylabel('$\dot{M}_*/M_\odot$yr$^{-1}$', fontsize=12) - #ax1.plot(Tzz, Tzz*0+10**lsfru+(lsfru-lsfrl)*.00, marker='|', color='k', ms=3, linestyle='None') if Txmax < np.max(age): Txmax = np.max(age) @@ -573,10 +577,13 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f ax2.set_ylabel('$\log M_*/M_\odot$', fontsize=12) ax2.set_xlim(Txmin, Txmax) - if f_axis_force: + if f_sfh_yaxis_force: ax2.set_ylim(y2min, y2max) + else: + y2min, y2max = ax2.get_ylim()[0], ax2.get_ylim()[1] + ax2.set_xscale('log') - ax2.text(np.min(age*1.05), y2min + 0.07*(y2max-y2min), 'ID: %s\n$z_\mathrm{obs.}:%.2f$\n$\log M_\mathrm{*}/M_\odot:%.2f$\n$\log Z_\mathrm{*}/Z_\odot:%.2f$\n$\log T_\mathrm{*}$/Gyr$:%.2f$\n$A_V$/mag$:%.2f$'\ + ax2.text(Txmin + 0.0002 * np.log10(Txmax/Txmin), y2min + 0.07*(y2max-y2min), 'ID: %s\n$z_\mathrm{obs.}:%.2f$\n$\log M_\mathrm{*}/M_\odot:%.2f$\n$\log Z_\mathrm{*}/Z_\odot:%.2f$\n$\log T_\mathrm{*}$/Gyr$:%.2f$\n$A_V$/mag$:%.2f$'\ %(ID, zbes, ACp[0,1], ZCp[0,1], np.nanmedian(TC[0,:]), Avtmp[1]), fontsize=9, bbox=dict(facecolor='w', alpha=0.7), zorder=10) # @@ -743,7 +750,7 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f ax4.set_ylim(y3min, y3max) ax4.set_xscale('log') - if f_axis_force: + if f_sfh_yaxis_force: Zticks = np.arange(MB.Zall.min(), MB.Zall.max()+delZZl, delZl) ax4.set_yticks(Zticks) Zlabels = [] From 79a967c37283f6dc7c8812472e2ace54ab66931a Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Mon, 30 Jan 2023 16:34:22 -0800 Subject: [PATCH 17/20] minor cleanup --- gsf/fitting.py | 25 +++++++++++++++---------- gsf/gsf.py | 10 ++++++---- gsf/maketmp_filt.py | 33 --------------------------------- 3 files changed, 21 insertions(+), 47 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index 0b47ab9..ed1f330 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -63,22 +63,21 @@ class Mainbody(): Otherwise, it would be either minimum value (=0.01; if one age bin), or the width to the next age bin. ''' - def __init__(self, inputs, c:float=3e18, Mpc_cm:float=3.08568025e+24, m0set:float=25.0, pixelscale:float=0.06, Lsun:float=3.839*1e33, - cosmo=None, idman=None, zman=None, NRbb_lim=10000): + cosmo=None, idman:str=None, zman=None, zman_min=None, zman_max=None, NRbb_lim=10000): ''' Parameters ---------- NRbb_lim : int BB data is associated with ids greater than this number. ''' - self.update_input(inputs, idman=idman, zman=zman) + self.update_input(inputs, idman=idman, zman=zman, zman_min=zman_min, zman_max=zman_max) self.NRbb_lim = NRbb_lim self.ztemplate = False def update_input(self, inputs, c:float=3e18, Mpc_cm:float=3.08568025e+24, m0set:float=25.0, pixelscale:float=0.06, Lsun:float=3.839*1e33, cosmo=None, - idman=None, zman=None, sigz:float=5.0): + idman:str=None, zman=None, zman_min=None, zman_max=None, sigz:float=5.0): ''' The purpose of this module is to register/update the parameter attributes in `Mainbody` by visiting the configuration file. @@ -134,13 +133,17 @@ def update_input(self, inputs, c:float=3e18, Mpc_cm:float=3.08568025e+24, m0set: if zman != None: self.zgal = zman - self.zmcmin = None - self.zmcmax = None + self.zmcmin = zman_min + self.zmcmax = zman_max else: try: self.zgal = float(inputs['ZMC']) except: - iix = np.where(self.fd_cat['id'] == self.ID) + ids_cat = self.fd_cat['id'].astype('str') + iix = np.where(ids_cat.value == self.ID) + if len(iix[0]) == 0: + msg = 'id `%s` cannot be found in the catalog, `%s`'%(self.ID,self.CAT_BB) + print_err(msg, exit=True) self.zgal = float(self.fd_cat['redshift'][iix]) try: @@ -425,6 +428,9 @@ def update_input(self, inputs, c:float=3e18, Mpc_cm:float=3.08568025e+24, m0set: except: print('BPASS_DIR is not found. Using default.') self.BPASS_DIR = '/astro/udfcen3/Takahiro/BPASS/' + if not os.path.exists(self.BPASS_DIR): + msg = 'BPASS directory, %s, not found.'%self.BPASS_DIR + print_err(msg, exit=True) self.BPASS_ver = 'v2.2.1' self.Zsun = 0.020 @@ -1856,10 +1862,9 @@ def __init__(self, flatchain, var_names, params_value, res): # Inserting result from res0 into res structure; res = get_res(flatchain, var_names, params_value, res) res.bic = -99 - else: - print('Sampling is not specified. Failed. Exiting.') - return False + msg = 'Sampling is not specified. Failed. Exiting.' + print_err(msg, exit=True) stop_mc = timeit.default_timer() tcalc_mc = stop_mc - start_mc diff --git a/gsf/gsf.py b/gsf/gsf.py index e6f3449..e9bcccb 100644 --- a/gsf/gsf.py +++ b/gsf/gsf.py @@ -81,8 +81,10 @@ def run_gsf_template(inputs, fplt=0, tau_lim=0.001, idman=None, nthin=1, delwave return MB -def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman=None, zman=None, f_label=True, f_symbol=True, - f_SFMS=False, f_fill=True, save_sed=True, figpdf=False, mmax=300, f_prior_sfh=False, norder_sfh_prior=3, +def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman:str=None, + zman=None, zman_min=None, zman_max=None, f_label=True, f_symbol=True, + f_SFMS=False, f_fill=True, save_sed=True, figpdf=False, mmax=300, + f_prior_sfh=False, norder_sfh_prior=3, f_shuffle=False, amp_shuffle=1e-2, Zini=None, tau_lim=0.001, skip_sfh=False, f_fancyplot=False, skip_zhist=False, f_sfh_yaxis_force=True, tset_SFR_SED=0.1, nthin=1, delwave=1, f_plot_resid=False, scale=1e-19, f_plot_filter=True @@ -104,8 +106,8 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman=None, zman=No ###################### from .function import read_input inputs = read_input(parfile) - - MB = Mainbody(inputs, c=3e18, Mpc_cm=3.08568025e+24, m0set=25.0, pixelscale=0.06, cosmo=cosmo, idman=idman, zman=zman) + MB = Mainbody(inputs, c=3e18, Mpc_cm=3.08568025e+24, m0set=25.0, pixelscale=0.06, + cosmo=cosmo, idman=idman, zman=zman, zman_min=zman_min, zman_max=zman_max) # Register some params; MB.tau_lim = tau_lim diff --git a/gsf/maketmp_filt.py b/gsf/maketmp_filt.py index 3e06e49..ffbb3ac 100644 --- a/gsf/maketmp_filt.py +++ b/gsf/maketmp_filt.py @@ -636,33 +636,6 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, else: spec_mul[ss,:] = spechdu['fspec_'+str(zz)+'_'+str(ss)+'_'+str(pp)][::nthin] # Lsun/A - """ - ################### - # Check xi_ion - ################### - h = 6.626e-34 # J s - # 1 J = 1e7 erg - h *= 1e7 # erg s - nu = c / wave # A/s / A = 1/s - # con_lyc = (wave[:-1]<912.0) & (wave[:-1]>228.0) - # nph = spec_mul[ss,:] / (h * nu) # Lsun/A / (erg s * 1/s) - # nph *= MB.Lsun # 1/A - # delwave_array = np.diff(wave) - # nph_Lyc = np.nansum(nph[:-1][con_lyc]*delwave_array[con_lyc]) - con_lyc = (wave[:]<912.0) & (wave[:]>228.0) - nph = spec_mul[ss,:] / (h * nu) # Lsun/s/A / (erg s * 1/s) = Lsun/s / A / erg - nph *= MB.Lsun # 1/s/A - nph_Lyc = np.nansum(nph[:][con_lyc]) # 1/s - - # UV Flux density; - fnu = flamtonu(wave, spec_mul[ss,:], m0set=MB.m0set) * MB.Lsun # erg/A - #/ (4. * np.pi * DL10**2) # fnu, in erg/s/cm2/Hz. - Fuv = get_Fuv(wave, fnu, lmin=1250, lmax=1650) # erg/A - # MUV = -2.5 * np.log10(Fuv) + MB.m0set - Luv = Fuv * (1650-1250) # erg - xi_ion = nph_Lyc / Luv # 1/s / (erg) - """ - ################### # IGM attenuation. ################### @@ -699,7 +672,6 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, try: spec_mul_nu_conv[ss,:] = convolve(spec_mul_nu[ss], LSF, boundary='extend') except: - #print('Error. No convolution is happening...') spec_mul_nu_conv[ss,:] = spec_mul_nu[ss] if zz==0 and ss==0: print('Kernel is too small. No convolution.') @@ -1177,11 +1149,6 @@ def maketemp(MB, ebblim=1e10, lamliml=0., lamlimu=50000., ncolbb=10000, ex_ex = dat['col5'] dict_bb_obs_removed = {'NR':NR_ex, 'x':x_ex, 'fy':fy_ex, 'ey':ey_ex, 'ex':ex_ex} MB.data['bb_obs_removed'] = dict_bb_obs_removed - # else: - # MB.data['bb_obs_removed'] = {} - # else:#except: - # print('Some unknown error in maketmp_filt...') - # pass # Dust; Not sure where this is being used... fw = open(file_tmp,'w') From a133a664e8a77e16528c91a9452bdcadd81d5e7a Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Tue, 31 Jan 2023 14:35:15 -0800 Subject: [PATCH 18/20] optimizing for memory --- gsf/fitting.py | 27 ++++++++++++++-------- gsf/plot_sed.py | 59 +++++++++++++++++++++++++++++++++++-------------- gsf/plot_sfh.py | 22 ++++++++++++++---- 3 files changed, 78 insertions(+), 30 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index ed1f330..84eda07 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -9,7 +9,6 @@ import matplotlib.pyplot as plt from lmfit import Model, Parameters, minimize, fit_report#, Minimizer from numpy import log10 -import pickle as cPickle import os.path import random import string @@ -1761,6 +1760,7 @@ def main(self, cornerplot:bool=True, specplot=1, sigz=1.0, ezmin=0.01, ferr=0, else: axes.set_xlabel("step number") plt.savefig('%s/chain_%s.png'%(self.DIR_OUT,self.ID)) + plt.close() # Similar for nested; # Dummy just to get structures; @@ -1878,10 +1878,20 @@ def __init__(self, flatchain, var_names, params_value, res): burnin = int(self.nmc/2) #burnin = 0 # Since already burnt in. savepath = self.DIR_OUT - cpklname = 'chain_' + self.ID + '_corner.cpkl' - savecpkl({'chain':flatchain, - 'burnin':burnin, 'nwalkers':self.nwalk,'niter':self.nmc,'ndim':self.ndim}, - savepath+cpklname) # Already burn in + + use_pickl = False + if use_pickl: + cpklname = 'chain_' + self.ID + '_corner.cpkl' + savecpkl({'chain':flatchain, + 'burnin':burnin, 'nwalkers':self.nwalk,'niter':self.nmc,'ndim':self.ndim}, + savepath+cpklname) # Already burn in + else: + import asdf + cpklname = 'chain_' + self.ID + '_corner.asdf' + tree = {'chain':flatchain.to_dict(), 'burnin':burnin, 'nwalkers':self.nwalk,'niter':self.nmc,'ndim':self.ndim} + af = asdf.AsdfFile(tree) + af.write_to(savepath+cpklname, all_array_compression='zlib') + stop_mc = timeit.default_timer() tcalc_mc = stop_mc - start_mc if verbose: @@ -1899,25 +1909,24 @@ def __init__(self, flatchain, var_names, params_value, res): val_truth = [] for par in var_names: val_truth.append(params_value[par]) + fig1 = corner.corner(flatchain, labels=var_names, \ label_kwargs={'fontsize':16}, quantiles=quantiles, show_titles=False, \ title_kwargs={"fontsize": 14}, \ truths=val_truth, \ plot_datapoints=False, plot_contours=True, no_fill_contours=True, \ plot_density=False, levels=levels, truth_color='gray', color='#4682b4') + fig1.savefig(self.DIR_OUT + 'SPEC_' + self.ID + '_corner.png') self.cornerplot_fig = fig1 # Analyze MCMC results. # Write to file. - stop = timeit.default_timer() + stop = timeit.default_timer() tcalc = stop - start # Then writing; - start_mc = timeit.default_timer() get_param(self, res, fitc, tcalc=tcalc, burnin=burnin) - stop_mc = timeit.default_timer() - tcalc_mc = stop_mc - start_mc return 2 # Cannot set to 1, to distinguish from retuen True diff --git a/gsf/plot_sed.py b/gsf/plot_sed.py index 42bc20f..fab2a70 100644 --- a/gsf/plot_sed.py +++ b/gsf/plot_sed.py @@ -628,20 +628,29 @@ def gaus(x,a,x0,sigma): # # From MCMC chain # - file = MB.DIR_OUT + 'chain_' + ID + '_corner.cpkl' - niter = 0 - data = loadcpkl(file) + samplepath = MB.DIR_OUT + use_pickl = False + if use_pickl: + pfile = 'chain_' + ID + '_corner.cpkl' + data = loadcpkl(os.path.join(samplepath+'/'+pfile)) + else: + pfile = 'chain_' + ID + '_corner.asdf' + data = asdf.open(os.path.join(samplepath+'/'+pfile)) + try: ndim = data['ndim'] # By default, use ndim and burnin values contained in the cpkl file, if present. burnin = data['burnin'] nmc = data['niter'] nwalk = data['nwalkers'] - Nburn = burnin #*20 - res = data['chain'][:] + Nburn = burnin + if use_pickl: + samples = data['chain'][:] + else: + samples = data['chain'] except: - if verbose: print(' = > NO keys of ndim and burnin found in cpkl, use input keyword values') - - samples = res + msg = ' = > NO keys of ndim and burnin found in cpkl, use input keyword values' + print_err(msg, exit=False) + return -1 # Saved template; ytmp = np.zeros((mmax,len(ysum)), dtype='float') @@ -1358,6 +1367,9 @@ def func_tmp(xint,eobs,fmodel): else: fig.savefig(MB.DIR_OUT + 'SPEC_' + ID + '_spec.png', dpi=dpi) + fig.clear() + plt.close() + def plot_sed_tau(MB, flim=0.01, fil_path='./', scale=1e-19, f_chind=True, figpdf=False, save_sed=True, mmax=300, dust_model=0, DIR_TMP='./templates/', f_label=False, f_bbbox=False, verbose=False, f_silence=True, @@ -2625,6 +2637,9 @@ def func_tmp(xint,eobs,fmodel): else: fig.savefig(MB.DIR_OUT + 'SPEC_' + ID + '_spec.png', dpi=dpi) + fig.clear() + plt.close() + def plot_filter(MB, ax, ymax, scl=0.3, cmap='gist_rainbow', alp=0.4, ind_remove=[]): ''' @@ -2842,19 +2857,29 @@ def plot_corner_physparam_summary(MB, fig=None, out_ind=0, DIR_OUT='./', mmax=10 #################### # MCMC corner plot. #################### - file = MB.DIR_OUT + 'chain_' + ID + '_corner.cpkl' - niter = 0 - data = loadcpkl(file) + use_pickl = False + samplepath = MB.DIR_OUT + if use_pickl: + pfile = 'chain_' + ID + '_corner.cpkl' + data = loadcpkl(os.path.join(samplepath+'/'+pfile)) + else: + pfile = 'chain_' + ID + '_corner.asdf' + data = asdf.open(os.path.join(samplepath+'/'+pfile)) try: - ndim = data['ndim'] # By default, use ndim and burnin values contained in the cpkl file, if present. + ndim = data['ndim'] # By default, use ndim and burnin values contained in the cpkl file, if present. burnin = data['burnin'] - nmc = data['niter'] - nwalk = data['nwalkers'] - Nburn = burnin - samples = data['chain'][:] + nmc = data['niter'] + nwalk = data['nwalkers'] + Nburn = burnin + if use_pickl: + samples = data['chain'][:] + else: + samples = data['chain'] except: - if verbose: print(' = > NO keys of ndim and burnin found in cpkl, use input keyword values') + msg = ' = > NO keys of ndim and burnin found in cpkl, use input keyword values' + print_err(msg, exit=False) + return -1 af = MB.af sedpar = af['ML'] diff --git a/gsf/plot_sfh.py b/gsf/plot_sfh.py index 182003c..c5d5735 100644 --- a/gsf/plot_sfh.py +++ b/gsf/plot_sfh.py @@ -209,19 +209,29 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f # Load Pickle ############################## samplepath = MB.DIR_OUT - pfile = 'chain_' + ID + '_corner.cpkl' niter = 0 - data = loadcpkl(os.path.join(samplepath+'/'+pfile)) + use_pickl = False + if use_pickl: + pfile = 'chain_' + ID + '_corner.cpkl' + data = loadcpkl(os.path.join(samplepath+'/'+pfile)) + else: + pfile = 'chain_' + ID + '_corner.asdf' + data = asdf.open(os.path.join(samplepath+'/'+pfile)) + try: ndim = data['ndim'] # By default, use ndim and burnin values contained in the cpkl file, if present. burnin = data['burnin'] nmc = data['niter'] nwalk = data['nwalkers'] Nburn = burnin - samples = data['chain'][:] + if use_pickl: + samples = data['chain'][:] + else: + samples = data['chain'] except: - print(' = > NO keys of ndim and burnin found in cpkl, use input keyword values') + msg = ' = > NO keys of ndim and burnin found in cpkl, use input keyword values' + print_err(msg, exit=False) return -1 ###################### @@ -795,6 +805,8 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f # Save fig.savefig(MB.DIR_OUT + 'SFH_' + ID + '_pcl.png', dpi=dpi) + fig.clear() + plt.close() def sfr_tau(t0, tau0, Z=0.0, sfh=0, tt=np.arange(0,13,0.1), Mtot=1., @@ -1462,6 +1474,8 @@ def plot_sfh_tau(MB, f_comp=0, flim=0.01, lsfrl=-1, mmax=1000, Txmin=0.08, Txmax # Save fig.savefig(MB.DIR_OUT + 'SFH_' + ID + '_pcl.png', dpi=dpi) + fig.clear() + plt.close() def get_evolv(MB, ID, Z=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, 1.0, 3.0], f_comp=0, fil_path='./FILT/', \ From a6ef2b67b69d78fb3345e727b31b73807c5a4516 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Tue, 31 Jan 2023 15:07:41 -0800 Subject: [PATCH 19/20] memory optimization --- gsf/fitting.py | 48 +++++++++++++++--------------------------------- gsf/gsf.py | 11 ++++++----- 2 files changed, 21 insertions(+), 38 deletions(-) diff --git a/gsf/fitting.py b/gsf/fitting.py index 84eda07..84f36cb 100644 --- a/gsf/fitting.py +++ b/gsf/fitting.py @@ -18,6 +18,10 @@ import scipy.interpolate as interpolate from astropy.io import fits,ascii import corner +import emcee +import zeus +import pandas as pd +import asdf # import from custom codes from .function import check_line_man, check_line_cz_man, calc_Dn4, savecpkl, get_leastsq, print_err @@ -25,6 +29,7 @@ from .writing import get_param from .function_class import Func from .minimizer import Minimizer +from .posterior_flexible import Post ############################ py_v = (sys.version_info[0]) @@ -41,7 +46,6 @@ LW = [2800, 3347, 3727, 3799, 3836, 3869, 4102, 4341, 4861, 4960, 5008, 5175, 6563, 6717, 6731] fLW = np.zeros(len(LW), dtype='int') -# NRbb_lim = 10000 # BB data is associated with ids greater than this number. class Mainbody(): ''' @@ -298,10 +302,6 @@ def update_input(self, inputs, c:float=3e18, Mpc_cm:float=3.08568025e+24, m0set: self.band_rf['%s_lam'%(self.filts_rf[ii])] = fd[:,1] self.band_rf['%s_res'%(self.filts_rf[ii])] = fd[:,2] / np.max(fd[:,2]) - # Tau comparison? - # -> Deprecated; - # self.ftaucomp = 0 - # Check if func model for SFH; try: self.SFH_FORM = int(inputs['SFH_FORM']) @@ -473,9 +473,9 @@ def update_input(self, inputs, c:float=3e18, Mpc_cm:float=3.08568025e+24, m0set: try: self.Avmin = float(inputs['AVMIN']) self.Avmax = float(inputs['AVMAX']) - if Avmin == Avmax: + if self.Avmin == self.Avmax: self.nAV = 0 - self.AVFIX = Avmin + self.AVFIX = self.Avmin self.has_AVFIX = True else: self.nAV = 1 @@ -850,9 +850,9 @@ def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, x_cz = self.dict['x'][con_cz] # Observed range NR_cz = self.dict['NR'][con_cz] - # kind='cubic' causes an error if len(xm_tmp)<=3; fint = interpolate.interp1d(xm_tmp, fm_tmp, kind='nearest', fill_value="extrapolate") fm_s = fint(x_cz) + del fint # # If Eazy result exists; @@ -997,9 +997,7 @@ def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, # Visual inspection; if self.fzvis==1: - # # Ask interactively; - # data_model_new = np.zeros((len(x_cz),4),'float') data_model_new[:,0] = x_cz data_model_new[:,1] = fm_s @@ -1073,6 +1071,7 @@ def fit_redshift(self, xm_tmp, fm_tmp, delzz=0.01, ezmin=0.01, zliml=0.01, print('Error is %.3f per cent.'%(eC2sigma*100)) print('##############################################################\n') plt.show() + plt.close() flag_z = raw_input('Do you want to continue with the input redshift, Cz0, Cz1, Cz2, and chi2/nu, %.5f %.5f %.5f %.5f %.5f? ([y]/n/m) '%\ (self.zgal, self.Cz0, self.Cz1, self.Cz2, self.fitc_cz_prev)) @@ -1113,7 +1112,7 @@ def get_zdist(self, f_interact=False, f_ascii=True): fig = plt.figure(figsize=(6.5,2.5)) fig.subplots_adjust(top=0.96, bottom=0.16, left=0.09, right=0.99, hspace=0.15, wspace=0.25) ax1 = fig.add_subplot(111) - n, nbins, patches = ax1.hist(self.res_cz.flatchain['z'], bins=200, density=True, color='gray', label='') + n, nbins, _ = ax1.hist(self.res_cz.flatchain['z'], bins=200, density=True, color='gray', label='') yy = np.arange(0,np.max(n),1) xx = yy * 0 + self.z_cz[1] @@ -1152,11 +1151,9 @@ def get_zdist(self, f_interact=False, f_ascii=True): if f_interact: fig.savefig(file_out, dpi=300) - # return fig, ax1 else: plt.savefig(file_out, dpi=300) plt.close() - # return True except: print('z-distribution figure is not generated.') pass @@ -1517,15 +1514,6 @@ def main(self, cornerplot:bool=True, specplot=1, sigz=1.0, ezmin=0.01, ferr=0, f_plot_chain : book Plot MC sample chain. ''' - import emcee - import zeus - try: - import multiprocess - except: - import multiprocessing as multiprocess - - from .posterior_flexible import Post - # Call likelihood/prior/posterior function; class_post = Post(self) @@ -1572,10 +1560,7 @@ def main(self, cornerplot:bool=True, specplot=1, sigz=1.0, ezmin=0.01, ferr=0, print('#####################################') print('\n\n') - Av_tmp = out.params['Av'].value - AA_tmp = np.zeros(len(self.age), dtype='float') - ZZ_tmp = np.zeros(len(self.age), dtype='float') - nrd_tmp, fm_tmp, xm_tmp = self.fnc.get_template(out, f_val=True, f_nrd=True, f_neb=False) + _, fm_tmp, xm_tmp = self.fnc.get_template(out, f_val=True, f_nrd=True, f_neb=False) else: csq = out.chisqr rcsq = out.redchi @@ -1740,7 +1725,7 @@ def main(self, cornerplot:bool=True, specplot=1, sigz=1.0, ezmin=0.01, ferr=0, # Plot for chain. if f_plot_chain: - fig, axes = plt.subplots(self.ndim, figsize=(10, 7), sharex=True) + _, axes = plt.subplots(self.ndim, figsize=(10, 7), sharex=True) samples = sampler.get_chain() labels = [] for key in out.params.valuesdict(): @@ -1761,6 +1746,9 @@ def main(self, cornerplot:bool=True, specplot=1, sigz=1.0, ezmin=0.01, ferr=0, axes.set_xlabel("step number") plt.savefig('%s/chain_%s.png'%(self.DIR_OUT,self.ID)) plt.close() + # For memory optimization; + del samples, axes + # Similar for nested; # Dummy just to get structures; @@ -1785,7 +1773,6 @@ def main(self, cornerplot:bool=True, specplot=1, sigz=1.0, ezmin=0.01, ferr=0, params_value[key] = np.median(flat_samples[nburn:,ii]) ii += 1 - import pandas as pd flatchain = pd.DataFrame(data=flat_samples[nburn:,:], columns=var_names) class get_res: @@ -1847,7 +1834,6 @@ def __init__(self, flatchain, var_names, params_value, res): params_value[key] = np.median(res0.samples[nburn:,ii]) ii += 1 - import pandas as pd flatchain = pd.DataFrame(data=res0.samples[nburn:,:], columns=var_names) class get_res: @@ -1886,7 +1872,6 @@ def __init__(self, flatchain, var_names, params_value, res): 'burnin':burnin, 'nwalkers':self.nwalk,'niter':self.nmc,'ndim':self.ndim}, savepath+cpklname) # Already burn in else: - import asdf cpklname = 'chain_' + self.ID + '_corner.asdf' tree = {'chain':flatchain.to_dict(), 'burnin':burnin, 'nwalkers':self.nwalk,'niter':self.nmc,'ndim':self.ndim} af = asdf.AsdfFile(tree) @@ -1930,7 +1915,6 @@ def __init__(self, flatchain, var_names, params_value, res): return 2 # Cannot set to 1, to distinguish from retuen True - elif flag_z == 'm': zrecom = raw_input('What is your manual input for redshift? [%.3f] '%(self.zgal)) if zrecom != '': @@ -2072,8 +2056,6 @@ def search_redshift(self, dict, xm_tmp, fm_tmp, zliml=0.01, zlimu=6.0, delzz=0.0 chi2s : numpy.array Array of chi2 values corresponding to zspace. ''' - import scipy.interpolate as interpolate - zspace = np.arange(zliml,zlimu,delzz) chi2s = np.zeros((len(zspace),2), 'float') if prior == None: diff --git a/gsf/gsf.py b/gsf/gsf.py index e9bcccb..ab67b10 100644 --- a/gsf/gsf.py +++ b/gsf/gsf.py @@ -12,6 +12,7 @@ from .maketmp_filt import maketemp,maketemp_tau from .function_class import Func,Func_tau from .basic_func import Basic,Basic_tau +from .function import read_input import timeit start = timeit.default_timer() @@ -81,7 +82,7 @@ def run_gsf_template(inputs, fplt=0, tau_lim=0.001, idman=None, nthin=1, delwave return MB -def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman:str=None, +def run_gsf_all(parfile, fplt, cornerplot=True, f_plot_chain=True, f_Alog=True, idman:str=None, zman=None, zman_min=None, zman_max=None, f_label=True, f_symbol=True, f_SFMS=False, f_fill=True, save_sed=True, figpdf=False, mmax=300, f_prior_sfh=False, norder_sfh_prior=3, @@ -104,7 +105,6 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman:str=None, ###################### # Read from Input file ###################### - from .function import read_input inputs = read_input(parfile) MB = Mainbody(inputs, c=3e18, Mpc_cm=3.08568025e+24, m0set=25.0, pixelscale=0.06, cosmo=cosmo, idman=idman, zman=zman, zman_min=zman_min, zman_max=zman_max) @@ -171,7 +171,7 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman:str=None, # # 2. Main fitting part. # - flag_suc = MB.main(cornerplot=cornerplot, f_shuffle=f_shuffle, amp_shuffle=amp_shuffle, Zini=Zini, + flag_suc = MB.main(cornerplot=cornerplot, f_plot_chain=f_plot_chain, f_shuffle=f_shuffle, amp_shuffle=amp_shuffle, Zini=Zini, f_prior_sfh=f_prior_sfh, norder_sfh_prior=norder_sfh_prior) while (flag_suc and flag_suc!=2): @@ -191,7 +191,7 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman:str=None, print('Going into another round with updated templates and redshift.') print('\n\n') - flag_suc = MB.main(cornerplot=cornerplot, f_shuffle=f_shuffle, amp_shuffle=amp_shuffle, Zini=Zini, + flag_suc = MB.main(cornerplot=cornerplot, f_plot_chain=f_plot_chain, f_shuffle=f_shuffle, amp_shuffle=amp_shuffle, Zini=Zini, f_prior_sfh=f_prior_sfh, norder_sfh_prior=norder_sfh_prior) # Total calculation time @@ -233,7 +233,6 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman:str=None, dust_model=MB.dust_model, DIR_TMP=MB.DIR_TMP, f_label=f_label, f_fill=f_fill, f_fancyplot=f_fancyplot, f_plot_resid=f_plot_resid, scale=scale, f_plot_filter=f_plot_filter) - if fplt == 6: # Use the final redshift; hd_sum = fits.open(os.path.join(MB.DIR_OUT, 'summary_%s.fits'%MB.ID))[0].header @@ -252,6 +251,8 @@ def run_gsf_all(parfile, fplt, cornerplot=True, f_Alog=True, idman:str=None, #from .plot_sed_logA import plot_corner_physparam_summary_tau as plot_corner_physparam_summary print('One for Tau model is TBD...') + return MB + if __name__ == "__main__": ''' From 27d5d4dc9e8ffe55c15dae810265c73c6b7f5468 Mon Sep 17 00:00:00 2001 From: Takahiro Morishita Date: Tue, 31 Jan 2023 15:26:41 -0800 Subject: [PATCH 20/20] mem optimization --- gsf/plot_sed.py | 27 ++++++--------------------- gsf/plot_sfh.py | 14 ++++---------- 2 files changed, 10 insertions(+), 31 deletions(-) diff --git a/gsf/plot_sed.py b/gsf/plot_sed.py index fab2a70..3ac5a0e 100644 --- a/gsf/plot_sed.py +++ b/gsf/plot_sed.py @@ -440,13 +440,11 @@ def gaus(x,a,x0,sigma): # This is for UVJ color time evolution. # Asum = np.sum(A50[:]) - alp = .5 - for jj in range(len(age)): ii = int(len(nage) - jj - 1) # from old to young templates. if jj == 0: y0, x0 = fnc.get_template_single(A50[ii], AAv[0], ii, Z50[ii], zbes, lib_all) - y0p, x0p = fnc.get_template_single(A50[ii], AAv[0], ii, Z50[ii], zbes, lib) + y0p, _ = fnc.get_template_single(A50[ii], AAv[0], ii, Z50[ii], zbes, lib) ysum = y0 ysump = y0p @@ -465,13 +463,13 @@ def gaus(x,a,x0,sigma): if MB.fneb: # Only at one age pixel; y0_r, x0_tmp = fnc.get_template_single(Aneb50, AAv[0], ii, Z50[ii], zbes, lib_neb_all, logU=logU50) - y0p, x0p = fnc.get_template_single(Aneb50, AAv[0], ii, Z50[ii], zbes, lib_neb, logU=logU50) + y0p, _ = fnc.get_template_single(Aneb50, AAv[0], ii, Z50[ii], zbes, lib_neb, logU=logU50) ysum += y0_r ysump[:nopt] += y0p else: y0_r, x0_tmp = fnc.get_template_single(A50[ii], AAv[0], ii, Z50[ii], zbes, lib_all) - y0p, x0p = fnc.get_template_single(A50[ii], AAv[0], ii, Z50[ii], zbes, lib) + y0p, _ = fnc.get_template_single(A50[ii], AAv[0], ii, Z50[ii], zbes, lib) ysum += y0_r ysump[:nopt] += y0p @@ -494,7 +492,7 @@ def gaus(x,a,x0,sigma): lmrest_wid = x0_wid/(1.+zbes) band0 = ['u','v','j'] - lmconv,fconv = filconv(band0, lmrest_wid, ysum_wid, fil_path) # f0 in fnu + _,fconv = filconv(band0, lmrest_wid, ysum_wid, fil_path) # f0 in fnu fu_t = fconv[0] fv_t = fconv[1] fj_t = fconv[2] @@ -510,9 +508,6 @@ def gaus(x,a,x0,sigma): conbb_ymax = (xbb>0) & (fybb>0) & (eybb>0) & (fybb/eybb>SNlim) ymax = np.max(fybb[conbb_ymax]*c/np.square(xbb[conbb_ymax])/d) * 1.6 - xboxl = 17000 - xboxu = 28000 - ax1.set_xlabel('Observed wavelength [$\mathrm{\mu m}$]', fontsize=11) ax1.set_ylabel('$f_\lambda$ [$10^{%d}\mathrm{erg}/\mathrm{s}/\mathrm{cm}^{2}/\mathrm{\AA}$]'%(np.log10(scale)),fontsize=11,labelpad=2) @@ -690,7 +685,6 @@ def gaus(x,a,x0,sigma): AA_tmp = 0 pass try: - Ztest = samples['Z'+str(len(age)-1)][nr] ZZ_tmp = samples['Z'+str(ss)][nr] except: try: @@ -711,7 +705,7 @@ def gaus(x,a,x0,sigma): mod0_tmp, xm_tmp = fnc.get_template_single(Aneb_tmp, Av_tmp, ss, ZZ_tmp, zmc, lib_neb_all, logU=logU_tmp) fm_tmp += mod0_tmp # Make no emission line template; - mod0_tmp_nl, xm_tmp_nl = fnc.get_template_single(0, Av_tmp, ss, ZZ_tmp, zmc, lib_neb_all, logU=logU_tmp) + mod0_tmp_nl, _ = fnc.get_template_single(0, Av_tmp, ss, ZZ_tmp, zmc, lib_neb_all, logU=logU_tmp) fm_tmp_nl += mod0_tmp_nl else: mod0_tmp, xx_tmp = fnc.get_template_single(AA_tmp, Av_tmp, ss, ZZ_tmp, zmc, lib_all) @@ -774,7 +768,7 @@ def gaus(x,a,x0,sigma): Lir[kk] = 0 # Get UVJ Color; - lmconv,fconv = filconv_fast(MB.filts_rf, MB.band_rf, x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d))) + _,fconv = filconv_fast(MB.filts_rf, MB.band_rf, x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d))) UVJ[kk,0] = -2.5*np.log10(fconv[0]/fconv[2]) UVJ[kk,1] = -2.5*np.log10(fconv[1]/fconv[2]) UVJ[kk,2] = -2.5*np.log10(fconv[2]/fconv[3]) @@ -849,20 +843,13 @@ def gaus(x,a,x0,sigma): elif f_fill: print('f_fancyplot is False. f_fill is set to False.') - ######################### # Calculate non-det chi2 # based on Sawick12 - ######################### - def func_tmp(xint,eobs,fmodel): - int_tmp = np.exp(-0.5 * ((xint-fmodel)/eobs)**2) - return int_tmp - if f_chind: conw = (wht3>0) & (ey>0) & (fy/ey>SNlim) else: conw = (wht3>0) & (ey>0) #& (fy/ey>SNlim) - #chi2 = sum((np.square(fy-ysump) * np.sqrt(wht3))[conw]) try: logf = hdul[1].data['logf'][1] ey_revised = np.sqrt(ey**2+ ysump**2 * np.exp(logf)**2) @@ -903,9 +890,7 @@ def func_tmp(xint,eobs,fmodel): fin_chi2 = -99 print('Final chi2/nu : %.2f'%(fin_chi2)) - # # plot BB model from best template (blue squares) - # col_dia = 'blue' if MB.f_dust: ALLFILT = np.append(SFILT,DFILT) diff --git a/gsf/plot_sfh.py b/gsf/plot_sfh.py index c5d5735..6974bc5 100644 --- a/gsf/plot_sfh.py +++ b/gsf/plot_sfh.py @@ -101,21 +101,16 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f ax1t = ax1.twiny() ax2t = ax2.twiny() - ################## - # Fitting Results - ################## - SNlim = 3 # avobe which SN line is shown. - ########################### # Open result file ########################### file = MB.DIR_OUT + 'summary_' + ID + '.fits' - hdul = fits.open(file) # open a FITS file + hdul = fits.open(file) try: zbes = hdul[0].header['zmc'] except: zbes = hdul[0].header['z'] - chinu= hdul[1].data['chi'] + chinu = hdul[1].data['chi'] try: RA = hdul[0].header['RA'] @@ -152,7 +147,6 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f # For cosmology #################### DL = MB.cosmo.luminosity_distance(zbes).value * Mpc_cm # Luminositydistance in cm - Cons = (4.*np.pi*DL**2/(1.+zbes)) Tuni = MB.cosmo.age(zbes).value Tuni0 = (Tuni - age[:]) @@ -473,7 +467,8 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f if False: f_rejuv,t_quench,t_rejuv = check_rejuv(age,SFp[:,:],ACp[:,:],SFMS_50) else: - print('Failed to call rejuvenation module.') + if verbose: + print('Failed to call rejuvenation module.') f_rejuv,t_quench,t_rejuv = 0,0,0 # Plot MS? @@ -526,7 +521,6 @@ def plot_sfh(MB, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, f ax4.errorbar(age[aa], ZCp[aa,1], xerr=[[delTl[aa]/1e9],[delTu[aa]/1e9]], yerr=[[ZCp[aa,1]-ZCp[aa,0]],[ZCp[aa,2]-ZCp[aa,1]]], linestyle='-', color=col[aa], lw=1, zorder=1) ax4.scatter(age[aa], ZCp[aa,1], marker='.', c=[col[aa]], edgecolor='k', s=msize[aa], zorder=2) - ############# # Axis #############