From b720fdc85c53838a296cfdf9e00c9b01b145cf4f Mon Sep 17 00:00:00 2001 From: Emmanuel Branlard Date: Mon, 21 Oct 2024 19:49:44 -0400 Subject: [PATCH] IO/Tools: udates from weio and welib --- pydatview/fast/postpro.py | 85 +++++++++--- pydatview/fast/subdyn.py | 16 +-- pydatview/io/fast_input_file.py | 179 ++++++++++++++++++++----- pydatview/io/fast_input_file_graph.py | 5 +- pydatview/io/fast_output_file.py | 92 ++++++++----- pydatview/io/hawc2_dat_file.py | 2 +- pydatview/io/mini_yaml.py | 38 +++++- pydatview/io/rosco_performance_file.py | 26 ++-- pydatview/tools/colors.py | 119 +++++++++++----- pydatview/tools/curve_fitting.py | 2 +- pydatview/tools/damping.py | 2 +- pydatview/tools/fatigue.py | 2 + 12 files changed, 425 insertions(+), 143 deletions(-) diff --git a/pydatview/fast/postpro.py b/pydatview/fast/postpro.py index e99577e..14b772d 100644 --- a/pydatview/fast/postpro.py +++ b/pydatview/fast/postpro.py @@ -3,6 +3,10 @@ import pandas as pd import numpy as np import re +try: + from scipy.integrate import cumulative_trapezoid +except: + from scipy.integrate import cumtrapz as cumulative_trapezoid import pydatview.io as weio from pydatview.common import PyDatViewException as WELIBException @@ -175,7 +179,6 @@ def AD_BldGag(AD,AD_bld,chordOut=False): return r_gag def BD_BldStations(BD, BDBld): - """ Returns BeamDyn Blade Quadrature Points positions: - Defines where BeamDyn outputs are provided. - Used by BeamDyn for the Input Mesh u%DistrLoad @@ -1621,7 +1624,7 @@ def bin_mean_DF(df, xbins, colBin ): raise Exception('The column `{}` does not appear to be in the dataframe'.format(colBin)) xmid = (xbins[:-1]+xbins[1:])/2 df['Bin'] = pd.cut(df[colBin], bins=xbins, labels=xmid ) # Adding a column that has bin attribute - df2 = df.groupby('Bin', observed=False).mean() # Average by bin + df2 = df.groupby('Bin', observed=False).mean() # Average by bin # also counting df['Counts'] = 1 dfCount=df[['Counts','Bin']].groupby('Bin', observed=False).sum() @@ -1787,7 +1790,48 @@ def renameCol(x): raise NotImplementedError() return MeanValues - +def FAIL(msg): + HEADER = '\033[95m' + RED = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + print(RED+'[FAIL] ' + msg + ENDC) + +def WARN(msg): + ORAN = '\033[93m' + ENDC = '\033[0m' + print(ORAN+'[WARN] ' + msg + ENDC) + +def INFO(msg): + ENDC = '\033[0m' + print('[INFO] ' + msg + ENDC) + +def OK(msg): + GREEN = '\033[92m' + ENDC = '\033[0m' + print(GREEN+'[ OK ] ' + msg + ENDC) + +class FileErrorLogger(): + def __init__(self): + self.firstWarn=True + self.firstErr=True + + def WARN(self, filename, msg): + if self.firstWarn: + baseDir = os.path.dirname(filename) + INFO('[INFO] In directory: {}'.format(baseDir)) + self.firstWarn = False + basename = os.path.basename(filename) + WARN('File {} {}'.format(basename, msg)) + + def FAIL(self, filename, msg): + if self.firstErr: + baseDir = os.path.dirname(filename) + INFO('[INFO] In directory: {}'.format(baseDir)) + self.firstErr = False + basename = os.path.basename(filename) + FAIL('File {} {}'.format(basename, msg)) def averagePostPro(outFiles_or_DFs,avgMethod='periods',avgParam=None, ColMap=None,ColKeep=None,ColSort=None,stats=['mean'], @@ -1822,6 +1866,7 @@ def averagePostPro(outFiles_or_DFs,avgMethod='periods',avgParam=None, raise Exception('No outFiles or DFs provided') invalidFiles =[] + log = FileErrorLogger() # Loop trough files and populate result for i,f in enumerate(outFiles_or_DFs): if isinstance(f, pd.DataFrame): @@ -1833,26 +1878,35 @@ def averagePostPro(outFiles_or_DFs,avgMethod='periods',avgParam=None, except: invalidFiles.append(f) continue - postpro=averageDF(df, avgMethod=avgMethod, avgParam=avgParam, ColMap=ColMap, ColKeep=ColKeep,ColSort=ColSort,stats=stats, filename=f) - MeanValues=postpro # todo + df_avg = averageDF(df, avgMethod=avgMethod, avgParam=avgParam, ColMap=ColMap, ColKeep=ColKeep,ColSort=ColSort,stats=stats, filename=f) + MeanValues= df_avg.copy() # todo if result is None: # We create a dataframe here, now that we know the colums columns = MeanValues.columns result = pd.DataFrame(np.nan, index=np.arange(len(outFiles_or_DFs)), columns=columns) if MeanValues.shape[1]!=result.shape[1]: - columns_ref = result.columns - columns_loc = MeanValues.columns + columns_ref = set(result.columns) + columns_loc = set(MeanValues.columns) if skipIfWrongCol: - print('[WARN] File {} has {} columns and not {}. Skipping.'.format(f, MeanValues.shape[1], result.shape[1])) + log.WARN(f, 'has {} columns and not {}. Skipping.'.format(MeanValues.shape[1], result.shape[1])) else: + columns_com = list(columns_ref.intersection(columns_loc)) + n_ref = len(columns_ref) + n_loc = len(columns_loc) + n_com = len(columns_com) + if n_com == 0: + log.FAIL(f, 'has no columns in common with first file. Skipping.') + continue try: - MeanValues=MeanValues[columns_ref] - result.iloc[i,:] = MeanValues.copy().values - print('[WARN] File {} has more columns than other files. Truncating.'.format(f, MeanValues.shape[1], result.shape[1])) + result.iloc[i][columns_com] = MeanValues[columns_com].iloc[0] + log.WARN(f, 'has {} columns, first file has {} columns, with {} in common. Truncating.'.format(n_loc, n_loc, n_com)) except: - print('[WARN] File {} is missing some columns compared to other files. Skipping.'.format(f)) + log.FAIL(f, 'has {} columns, first file has {} columns, with {} in common. Failed to assign common columns.'.format(n_loc, n_loc, n_com)) else: - result.iloc[i,:] = MeanValues.copy().values + try: + result.iloc[i] = MeanValues.iloc[0] + except: + import pdb; pdb.set_trace() if len(invalidFiles)==len(outFiles_or_DFs): @@ -1904,12 +1958,11 @@ def integrateMomentTS(r, F): - M: array nt x nr of integrated moment at each radial station """ - import scipy.integrate as si # Compute \int_{r_j}^{r_n} f(r) dr, with "j" each column - IF = np.fliplr(-si.cumtrapz(np.fliplr(F), r[-1::-1])) + IF = np.fliplr(-cumulative_trapezoid(np.fliplr(F), r[-1::-1])) # Compute \int_{r_j}^{r_n} f(r)*r dr, with "j" each column FR = F * r - IFR = np.fliplr(-si.cumtrapz(np.fliplr(FR), r[-1::-1])) + IFR = np.fliplr(-cumulative_trapezoid(np.fliplr(FR), r[-1::-1])) # Compute x_j * \int_{r_j}^(r_n) f(r) * r dr R_IF = IF * r[:-1] # \int_{r_j}^(r_n) f(r) * (r-r_j) dr = IF + IFR diff --git a/pydatview/fast/subdyn.py b/pydatview/fast/subdyn.py index 25ff4ff..7dfb98b 100644 --- a/pydatview/fast/subdyn.py +++ b/pydatview/fast/subdyn.py @@ -63,7 +63,7 @@ def __repr__(self): # --------------------------------------------------------------------------------} # --- Functions for general FEM model (jacket, flexible floaters) # --------------------------------------------------------------------------------{ - def init(self, TP=(0,0,0), gravity = 9.81): + def init(self, TP=(0,0,0), gravity = 9.81, verbose=False): """ Initialize SubDyn FEM model @@ -89,22 +89,22 @@ def init(self, TP=(0,0,0), gravity = 9.81): #print('>>> graph\n',graph) #graph.toJSON('_GRAPH.json') # Convert to FEM model - with Timer('From graph'): + with Timer('From graph', silent=not verbose): FEM = femm.FEMModel.from_graph(self.graph, mainElementType=mainElementType, refPoint=TP, gravity=gravity) #model.toJSON('_MODEL.json') - with Timer('Assembly'): + with Timer('Assembly', silent=not verbose): FEM.assembly() - with Timer('Internal constraints'): + with Timer('Internal constraints', silent=not verbose): FEM.applyInternalConstraints() FEM.partition() - with Timer('BC'): + with Timer('BC', silent=not verbose): FEM.applyFixedBC() - with Timer('EIG'): + with Timer('EIG', silent=not verbose): Q, freq = FEM.eig(normQ='byMax') - with Timer('CB'): + with Timer('CB', silent=not verbose): FEM.CraigBampton(nModesCB = self.File['Nmodes']) - with Timer('Modes'): + with Timer('Modes', silent=not verbose): FEM.setModes(nModesFEM=30, nModesCB=self.File['Nmodes']) # FEM.nodesDisp(Q) diff --git a/pydatview/io/fast_input_file.py b/pydatview/io/fast_input_file.py index 152c43e..664dc56 100644 --- a/pydatview/io/fast_input_file.py +++ b/pydatview/io/fast_input_file.py @@ -19,6 +19,7 @@ class BrokenFormatError(Exception): pass TABTYPE_NUM_SUBDYNOUT = 7 TABTYPE_MIX_WITH_HEADER = 6 TABTYPE_FIL = 3 +TABTYPE_OUTLIST = 33 TABTYPE_FMT = 9999 # TODO @@ -140,10 +141,10 @@ def __iter__(self): def __next__(self): return self.fixedfile.__next__() - def __setitem__(self,key,item): - return self.fixedfile.__setitem__(key,item) + def __setitem__(self, key, item): + return self.fixedfile.__setitem__(key, item) - def __getitem__(self,key): + def __getitem__(self, key): return self.fixedfile.__getitem__(key) def __repr__(self): @@ -151,6 +152,56 @@ def __repr__(self): #s ='Fast input file: {}\n'.format(self.filename) #return s+'\n'.join(['{:15s}: {}'.format(d['label'],d['value']) for i,d in enumerate(self.data)]) + def delete(self, key, error=False): + self.pop(key, error=error) + + def pop(self, key, error=False): + if isinstance(key, int): + i = key + else: + i = self.fixedfile.getIDSafe(key) + if i>=0: + d = self.data[i] + del self.data[i] + return d + else: + if error: + raise Exception('Key `{}` not found in file:{}'.format(key, self.filename)) + else: + print('[WARN] Key `{}` not found in file:{}'.format(key, self.filename)) + return None + + def insertComment(self, i, comment='', error=False): + d = getDict() + d['value'] = comment + d['label'] = '' + d['descr'] = '' + d['isComment'] = True + try: + self.data.insert(i, d) + except: + import pdb; pdb.set_trace() + + def insertKeyVal(self, i, key, value, description='', error=False): + d = getDict() + d['value'] = value + d['label'] = key + d['descr'] = description + d['isComment'] = False + self.data.insert(i, d) + + def insertKeyValAfter(self, key_prev, key, value, description, error=False): + i = self.fixedfile.getIDSafe(key_prev) + if i<0: + if error: + raise Exception('Key `{}` not found in file:{}'.format(key_prev, self.filename)) + else: + print('[WARN] Key `{}` not found in file:{}'.format(key_prev, self.filename)) + self.insertKeyVal(i+1, key, value, description, error=error) + + + + # --------------------------------------------------------------------------------} # --- BASE INPUT FILE @@ -185,12 +236,12 @@ def defaultExtensions(): def formatName(): return 'FAST input file Base' - def __init__(self, filename=None, **kwargs): + def __init__(self, filename=None, IComment=None, **kwargs): self._size=None self.setData() # Init data if filename: self.filename = filename - self.read() + self.read(IComment=IComment) def setData(self, filename=None, data=None, hasNodal=False, module=None): """ Set the data of this object. This object shouldn't store anything else. """ @@ -261,7 +312,7 @@ def __setitem__(self, key, item): pass self.data[i]['value'] = item - def __getitem__(self,key): + def __getitem__(self, key): i = self.getID(key) return self.data[i]['value'] @@ -322,7 +373,7 @@ def _IComment(self): return [1] # Typical OpenFAST files have comment on second line [1] - def read(self, filename=None): + def read(self, filename=None, IComment=None): if filename: self.filename = filename if self.filename: @@ -330,11 +381,13 @@ def read(self, filename=None): raise OSError(2,'File not found:',self.filename) if os.stat(self.filename).st_size == 0: raise EmptyFileError('File is empty:',self.filename) - self._read() + self._read(IComment=IComment) else: raise Exception('No filename provided') - def _read(self): + def _read(self, IComment=None): + if IComment is None: + IComment=[] # --- Tables that can be detected based on the "Value" (first entry on line) # TODO members for BeamDyn with mutliple key point ####### TODO PropSetID is Duplicate SubDyn and used in HydroDyn @@ -425,6 +478,11 @@ def _read(self): labOffset='' while i0 \ @@ -445,19 +503,19 @@ def _read(self): else: d['label'] = firstword d['descr'] = remainer - d['tabType'] = TABTYPE_FIL # TODO + d['tabType'] = TABTYPE_OUTLIST # TODO d['value'] = ['']+OutList self.data.append(d) if i>=len(lines): + self.addComment('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)') + self.addComment('---------------------------------------------------------------------------------------') break # --- Here we cheat and force an exit of the input file # The reason for this is that some files have a lot of things after the END, which will result in the file being intepreted as a wrong format due to too many comments if i+20 or lines[i+2].lower().find('bldnd_bloutnd')>0): self.hasNodal=True else: - self.data.append(parseFASTInputLine('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)',i+1)) - self.data.append(parseFASTInputLine('---------------------------------------------------------------------------------------',i+2)) - break + pass elif line.upper().find('SSOUTLIST' )>0 or line.upper().find('SDOUTLIST' )>0: # SUBDYN Outlist doesn not follow regular format self.data.append(parseFASTInputLine(line,i)) @@ -470,8 +528,8 @@ def _read(self): d['value']=o self.data.append(d) # --- Here we cheat and force an exit of the input file - self.data.append(parseFASTInputLine('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)',i+1)) - self.data.append(parseFASTInputLine('---------------------------------------------------------------------------------------',i+2)) + self.addComment('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)') + self.addComment('---------------------------------------------------------------------------------------') break elif line.upper().find('ADDITIONAL STIFFNESS')>0: # TODO, lazy implementation so far, MAKE SUB FUNCTION @@ -493,18 +551,33 @@ def _read(self): i+=1; self.readBeamDynProps(lines,i) return + elif line.upper().find('GLBDCM')>0: + # BeamDyn DCM has no label..... + self.addComment(lines[i]); i+=1 + self.addComment(lines[i]); i+=1 + d = getDict() + d['label'] = 'GlbDCM' + d['tabType'] = TABTYPE_NUM_NO_HEADER + nTabLines = 3 + nHeaders = 0 + d['value'], d['tabColumnNames'], d['tabUnits'] = parseFASTNumTable(self.filename, lines[i:i+nTabLines],nTabLines, i, nHeaders, tableType='num', nOffset=0) + i=i+3 + self.data.append(d) + continue + + #---The following 3 by 3 matrix is the direction cosine matirx ,GlbDCM(3,3), elif line.upper().find('OUTPUTS')>0: if 'Points' in self.keys() and 'dtM' in self.keys(): OutList,i = parseFASTOutList(lines,i+1) d = getDict() d['label'] = 'Outlist' d['descr'] = '' - d['tabType'] = TABTYPE_FIL # TODO + d['tabType'] = TABTYPE_OUTLIST d['value'] = OutList self.addComment('------------------------ OUTPUTS --------------------------------------------') self.data.append(d) - self.addComment('END') - self.addComment('------------------------- need this line --------------------------------------') + self.addComment('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)') + self.addComment('---------------------------------------------------------------------------------------') return # --- Parsing of standard lines: value(s) key comment @@ -555,6 +628,7 @@ def _read(self): d['tabType'] = TABTYPE_NUM_WITH_HEADERCOM nTabLines = self[d['tabDimVar']]-1 # SOMEHOW ONE DATA POINT LESS d['value'], d['tabColumnNames'],_ = parseFASTNumTable(self.filename,lines[i:i+nTabLines+1],nTabLines,i,1) + d['descr'] = '' # d['tabUnits'] = ['(-)','(-)'] self.data.append(d) break @@ -597,6 +671,7 @@ def _read(self): nTabLines = self[d['tabDimVar']] #print('Reading table {} Dimension {} (based on {})'.format(d['label'],nTabLines,d['tabDimVar'])); d['value'], d['tabColumnNames'], d['tabUnits'] = parseFASTNumTable(self.filename,lines[i:i+nTabLines+nHeaders], nTabLines, i, nHeaders, tableType=tab_type, varNumLines=d['tabDimVar']) + _, d['descr'] = splitAfterChar(lines[i], '!') i += nTabLines+nHeaders-1 # --- Temporary hack for e.g. SubDyn, that has duplicate table, impossible to detect in the current way... @@ -663,6 +738,7 @@ def _read(self): d['label'] += labOffset #print('Reading table {} Dimension {} (based on {})'.format(d['label'],nTabLines,d['tabDimVar'])); d['value'], d['tabColumnNames'], d['tabUnits'] = parseFASTNumTable(self.filename,lines[i:i+nTabLines+nHeaders+nOffset],nTabLines,i, nHeaders, tableType=tab_type, nOffset=nOffset, varNumLines=d['tabDimVar']) + d['descr'] = '' # i += nTabLines+1-nOffset # --- Temporary hack for e.g. SubDyn, that has duplicate table, impossible to detect in the current way... @@ -734,10 +810,11 @@ def toString(self): def toStringVLD(val,lab,descr): val='{}'.format(val) lab='{}'.format(lab) - if len(val)<13: - val='{:13s}'.format(val) - if len(lab)<13: - lab='{:13s}'.format(lab) + # Trying to reproduce WISDEM format + if len(val)<22: + val='{:22s}'.format(val) + if len(lab)<11: + lab='{:11s}'.format(lab) return val+' '+lab+' - '+descr.strip().lstrip('-').lstrip() def toStringIntFloatStr(x): @@ -764,8 +841,6 @@ def mat_tostring(M,fmt='24.16e'): s+='\n' s+='\n' return s - - for i in range(len(self.data)): d=self.data[i] if d['isComment']: @@ -777,32 +852,42 @@ def mat_tostring(M,fmt='24.16e'): else: s+=toStringVLD(d['value'],d['label'],d['descr']) elif d['tabType']==TABTYPE_NUM_WITH_HEADER: + PrettyCols= d['label']!='TowProp' # Temporary hack for AeroDyn tower table if d['tabColumnNames'] is not None: - s+='{}'.format(' '.join(['{:15s}'.format(s) for s in d['tabColumnNames']])) - #s+=d['descr'] # Not ready for that + if PrettyCols: + s+='{}'.format(' '.join(['{:^15s}'.format(s) for s in d['tabColumnNames']])) + else: + s+='{}'.format(' '.join(['{:14s}'.format(s) for s in d['tabColumnNames']])) + if len(d['descr'])>0 and d['descr'][0]=='!': + s+=d['descr'] if d['tabUnits'] is not None: s+='\n' - s+='{}'.format(' '.join(['{:15s}'.format(s) for s in d['tabUnits']])) + if PrettyCols: + s+='{}'.format(' '.join(['{:^15s}'.format(s) for s in d['tabUnits']])) + else: + s+='{}'.format(' '.join(['{:14s}'.format(s) for s in d['tabUnits']])) newline='\n' else: newline='' if np.size(d['value'],0) > 0 : s+=newline - s+='\n'.join('\t'.join( ('{:15.0f}'.format(x) if int(x)==x else '{:15.8e}'.format(x) ) for x in y) for y in d['value']) + if PrettyCols: + s+='\n'.join('\t'.join( ('{:^15.0f}'.format(x) if int(x)==x else '{:15.8e}'.format(x) ) for x in y) for y in d['value']) + else: + s+='\n'.join(' '.join( ('{:13.7E}'.format(x) ) for x in y) for y in d['value']) elif d['tabType']==TABTYPE_MIX_WITH_HEADER: s+='{}'.format(' '.join(['{:15s}'.format(s) for s in d['tabColumnNames']])) if d['tabUnits'] is not None: s+='\n' - s+='{}'.format(' '.join(['{:15s}'.format(s) for s in d['tabUnits']])) + s+='{}'.format(' '.join(['{:^15s}'.format(s) for s in d['tabUnits']])) if np.size(d['value'],0) > 0 : s+='\n' s+='\n'.join('\t'.join(toStringIntFloatStr(x) for x in y) for y in d['value']) elif d['tabType']==TABTYPE_NUM_WITH_HEADERCOM: - s+='! {}\n'.format(' '.join(['{:15s}'.format(s) for s in d['tabColumnNames']])) - s+='! {}\n'.format(' '.join(['{:15s}'.format(s) for s in d['tabUnits']])) + s+='! {}\n'.format(' '.join(['{:^15s}'.format(s) for s in d['tabColumnNames']])) + s+='! {}\n'.format(' '.join(['{:^15s}'.format(s) for s in d['tabUnits']])) s+='\n'.join('\t'.join('{:15.8e}'.format(x) for x in y) for y in d['value']) elif d['tabType']==TABTYPE_FIL: - #f.write('{} {} {}\n'.format(d['value'][0],d['tabDetect'],d['descr'])) label = d['label'] if 'kbot' in self.keys(): # Moordyn has no 'OutList' label.. label='' @@ -811,6 +896,10 @@ def mat_tostring(M,fmt='24.16e'): else: s+='{} {} {}\n'.format(d['value'][0], label, d['descr']) # TODO? s+='\n'.join(fil for fil in d['value'][1:]) + elif d['tabType']==TABTYPE_OUTLIST: + label = d['label'] + s+='{:22s} {:11s} - {}\n'.format('', label, d['descr']) + s+='\n'.join(fil for fil in d['value'][1:]) elif d['tabType']==TABTYPE_NUM_BEAMDYN: # TODO use dedicated sub-class data = d['value'] @@ -824,11 +913,14 @@ def mat_tostring(M,fmt='24.16e'): s += beamdyn_section_mat_tostring(x,K,M) elif d['tabType']==TABTYPE_NUM_SUBDYNOUT: data = d['value'] - s+='{}\n'.format(' '.join(['{:15s}'.format(s) for s in d['tabColumnNames']])) - s+='{}'.format(' '.join(['{:15s}'.format(s) for s in d['tabUnits']])) + s+='{}\n'.format(' '.join(['{:^15s}'.format(s) for s in d['tabColumnNames']])) + s+='{}'.format(' '.join(['{:^15s}'.format(s) for s in d['tabUnits']])) if np.size(d['value'],0) > 0 : s+='\n' s+='\n'.join('\t'.join('{:15.0f}'.format(x) for x in y) for y in data) + elif d['tabType']==TABTYPE_NUM_NO_HEADER: + data = d['value'] + s+='\n'.join('\t'.join( ('{:15.0f}'.format(x) if int(x)==x else '{:15.8e}'.format(x) ) for x in y) for y in d['value']) else: raise Exception('Unknown table type for variable {}'.format(d)) if i0: return l[:n] else: return l +def splitAfterChar(l, c): + n = l.find(c); + if n>0: + return l[:n], l[n:] + else: + return l, '' def getDict(): return {'value':None, 'label':'', 'isComment':False, 'descr':'', 'tabType':TABTYPE_NOT_A_TAB} @@ -1076,6 +1174,12 @@ def _merge_value(splits): def parseFASTInputLine(line_raw,i,allowSpaceSeparatedList=False): d = getDict() + line_low = line_raw.lower() + if line_low=='end' or line_low.startswith('end of') or line_low.startswith('end ('): + d['isComment'] = True + d['value'] = line_raw + d['label'] = 'END' + return d #print(line_raw) try: # preliminary cleaning (Note: loss of formatting) @@ -1188,7 +1292,7 @@ def parseFASTOutList(lines,iStart): if i>=len(lines): print('[WARN] End of file reached while reading Outlist') #i=min(i+1,len(lines)) - return OutList,iStart+len(OutList) + return OutList, iStart+len(OutList) def extractWithinParenthesis(s): @@ -1241,6 +1345,7 @@ def parseFASTNumTable(filename,lines,n,iStart,nHeaders=2,tableType='num',nOffset Units = None + i = 0 if len(lines)!=n+nHeaders+nOffset: raise BrokenFormatError('Not enough lines in table: {} lines instead of {}\nFile:{}'.format(len(lines)-nHeaders,n,filename)) try: @@ -2029,7 +2134,7 @@ def __init__(self, filename=None, **kwargs): self.module='ExtPtfm' - def _read(self): + def _read(self, IComment=None): with open(self.filename, 'r', errors="surrogateescape") as f: lines=f.read().splitlines() detectAndReadExtPtfmSE(self, lines) diff --git a/pydatview/io/fast_input_file_graph.py b/pydatview/io/fast_input_file_graph.py index b4a558c..8d4f451 100644 --- a/pydatview/io/fast_input_file_graph.py +++ b/pydatview/io/fast_input_file_graph.py @@ -134,7 +134,7 @@ def subdynToGraph(sd, propToNodes=False, propToElem=False): # --------------------------------------------------------------------------------} # --- HydroDyn # --------------------------------------------------------------------------------{ -def hydrodynToGraph(hd, propToNodes=False, propToElem=False): +def hydrodynToGraph(hd, propToNodes=False, propToElem=False, verbose=False): """ hd: dict-like object as returned by weio @@ -195,7 +195,8 @@ def type2Color(Pot): if 'FillGroups' in hd.keys(): # Filled members Graph.addMiscPropertySet('FillGroups') - print('>>> TODO Filled Groups') + if verbose: + print('>>> TODO Filled Groups') #for ip,P in enumerate(hd['FillGroups']): # # FillNumM FillMList FillFSLoc FillDens # raise NotImplementedError('hydroDynToGraph, Fill List might not be properly set, verify below') diff --git a/pydatview/io/fast_output_file.py b/pydatview/io/fast_output_file.py index 3713fe2..8748142 100644 --- a/pydatview/io/fast_output_file.py +++ b/pydatview/io/fast_output_file.py @@ -150,7 +150,8 @@ def readline(iLine): cols=[n+'_['+u.replace('sec','s')+']' for n,u in zip(info['attribute_names'], info['attribute_units'])] else: cols=info['attribute_names'] - + self.description = info.get('description', '') + self.description = ''.join(self.description) if isinstance(self.description,list) else self.description if isinstance(self.data, pd.DataFrame): self.data.columns = cols else: @@ -174,10 +175,15 @@ def write(self, filename=None, binary=None, fileID=4): else: # ascii output with open(self.filename,'w') as f: + f.write(self.description) # add description to the begining of the file f.write('\t'.join(['{:>10s}'.format(c) for c in self.channels])+'\n') f.write('\t'.join(['{:>10s}'.format('('+u+')') for u in self.units])+'\n') # TODO better.. - f.write('\n'.join(['\t'.join(['{:10.4f}'.format(y[0])]+['{:10.3e}'.format(x) for x in y[1:]]) for y in self.data])) + if self.data is not None: + if isinstance(self.data, pd.DataFrame) and not self.data.empty: + f.write('\n'.join(['\t'.join(['{:10.4f}'.format(y.iloc[0])]+['{: .5e}'.format(x) for x in y.iloc[1:]]) for _, y in self.data.iterrows()])) + else: # in case data beeing array or list of list. + f.write('\n'.join(['\t'.join(['{:10.4f}'.format(y)]+['{: .5e}'.format(x) for x in y]) for y in self.data])) @property def channels(self): @@ -231,7 +237,7 @@ def driverFile(self): except: return None - def to2DFields(self, DeltaAzi=5, nPeriods=3, rcoords=None, **kwargs): + def to2DFields(self, DeltaAzi=5, nPeriods=3, rcoords=None, kinds=['(t,r)','(psi,r)'], **kwargs): import pydatview.fast.postpro as fastlib def insertName(ds, name, dims): @@ -252,43 +258,61 @@ def insertName(ds, name, dims): # Sanity check DeltaAzi=float(DeltaAzi) df = self.toDataFrame() - # --- Time wise - ds_AD, ds_ED, ds_BD = fastlib.spanwisePostProRows(df, driverFile, si1='t', sir='r') - if ds_AD is None: - return None # No Hope - if rcoords is not None: - runit = 'm' - ds_AD['r_[m]'] = rcoords - else: - if 'r_[m]' in ds_AD.keys(): - rcoords =ds_AD['r_[m]'].values + ds_AD1 = None + ds_AD2 = None + # --- Radial coordinate + def get_rvals(ds, rcoords): + n = len(ds['i/n_[-]'].values) + if rcoords is not None: # priority to user input + runit = 'm' + rvals = rcoords + if len(rcoords)!=n: + raise Exception('Length of rcoords should be: {}'.format(n)) + elif 'r_[m]' in ds.keys(): + rvals = ds['r_[m]'].values runit = 'm' else: - rcoords =ds_AD['i/n_[-]'].values + rvals = ds['i/n_[-]'].values runit = '-' - # Rename columns to make them unique - ds_AD = insertName(ds_AD, '(t,r)', ('t','r')) - try: - ds_AD.coords['t'] = ('t', df['Time_[s]'].values) - except: - pass - ds_AD.coords['r'] = ('r', rcoords) + return rvals, runit + # --- Time wise + if '(t,r)' in kinds: + ds_AD1, ds_ED, ds_BD = fastlib.spanwisePostProRows(df, driverFile, si1='t', sir='r') + if ds_AD1 is None: + return None # No Hope + rvals, runit = get_rvals(ds_AD1, rcoords) + # Rename columns to make them unique + ds_AD1 = insertName(ds_AD1, '(t,r)', ('t','r')) + try: + ds_AD1.coords['t'] = ('t', df['Time_[s]'].values) + except: + pass + ds_AD1.coords['r'] = ('r', rvals) + ds_AD1.r.attrs['unit'] = runit + ds_AD1.t.attrs['unit'] = 's' # --- Azimuthal Radial postpro - psi = np.arange(0, 360+DeltaAzi/10, DeltaAzi) - dfPsi = fastlib.azimuthal_average_DF(df, psiBin=psi, periodic=True, nPeriods=nPeriods) #, tStart = time[-1]-20) - ds_AD2, ds_ED2, ds_BD2 = fastlib.spanwisePostProRows(dfPsi, driverFile, si1='psi', sir='r') - ds_AD2.coords['psi'] = ('psi', psi) # TODO hack from bin to bin edges... - ds_AD2.coords['r'] = ('r', rcoords) - # Rename columns to make them unique - ds_AD2= insertName(ds_AD2, '(psi,r)', ('psi','r')) + if '(psi,r)' in kinds: + psi = np.arange(0, 360+DeltaAzi/10, DeltaAzi) + dfPsi = fastlib.azimuthal_average_DF(df, psiBin=psi, periodic=True, nPeriods=nPeriods) #, tStart = time[-1]-20) + ds_AD2, ds_ED2, ds_BD2 = fastlib.spanwisePostProRows(dfPsi, driverFile, si1='psi', sir='r') + rvals, runit = get_rvals(ds_AD2, rcoords) + ds_AD2.coords['psi'] = ('psi', psi) # TODO hack from bin to bin edges... + ds_AD2.coords['r'] = ('r', rvals) + # Rename columns to make them unique + ds_AD2= insertName(ds_AD2, '(psi,r)', ('psi','r')) + ds_AD2.psi.attrs['unit'] = 'deg' + ds_AD2.r.attrs['unit'] = runit # --- Combine into one field (need unique variables and dimension) - ds_AD = ds_AD.merge(ds_AD2, compat='override') - ds_AD.r.attrs['unit'] = runit - ds_AD.t.attrs['unit'] = 's' - ds_AD.psi.attrs['unit'] = 'deg' - ds_AD.r.attrs['unit'] = runit + if ds_AD1 is not None and ds_AD2 is not None: + ds_AD = ds_AD1.merge(ds_AD2, compat='override') + elif ds_AD1 is not None: + ds_AD = ds_AD1 + else: + ds_AD = ds_AD2 + + # --- # # ds_AD = ds_AD.swap_dims(dims_dict={'it': 't'}) # # ds_AD = ds_AD.drop_vars('it') # # ds_AD.coords['r'] = ('ir', rcoords) @@ -654,7 +678,7 @@ def writeBinary(fileName, channels, chanNames, chanUnits, fileID=4, descStr=''): ranges[ranges==0]=1 # range set to 1 for constant channel. In OpenFAST: /sqrt(epsilon(1.0_SiKi)) ColScl = np.single(int16Rng/ranges) ColOff = np.single(int16Min - np.single(mins)*ColScl) - + #Just available for fileID if fileID not in [2,4]: print("current version just works with fileID = 2 or 4") diff --git a/pydatview/io/hawc2_dat_file.py b/pydatview/io/hawc2_dat_file.py index 7ee37f6..4a6ba4a 100644 --- a/pydatview/io/hawc2_dat_file.py +++ b/pydatview/io/hawc2_dat_file.py @@ -103,7 +103,7 @@ def _write(self): nChannels = self.data.shape[1] SimTime = self.data[-1,0] #-self.data[0,0] # --- dat file - np.savetxt(datfilename, self.data, fmt=b'%16.8e') + np.savetxt(datfilename, self.data, fmt='%16.8e') # --- Sel file with open(selfilename, 'w') as f: if self.bHawc: diff --git a/pydatview/io/mini_yaml.py b/pydatview/io/mini_yaml.py index 0650c0e..db10ee5 100644 --- a/pydatview/io/mini_yaml.py +++ b/pydatview/io/mini_yaml.py @@ -54,14 +54,50 @@ def yaml_read(filename=None, dictIn=None, lines=None, text=None): d[key]=float(val) except: d[key]=val.strip() # string + elif len(sp)>=2 and sp[1].strip()[0]=='{': + key = sp[0] + i1=l.find('{') + i2=l.find('}') + LL = l[i1:i2+1] + D=_str2dict(LL) + d[key] = D + elif len(sp)>=2: + key = sp[0] + value = ':'.join(sp[1:]) + d[key] = value + print('mini_yaml: Setting {}={}'.format(key, value)) + else: - raise Exception('Line {:d} has colon, number of splits is {}, which is not supported'.format(len(sp))) + raise Exception('Line {:d} has colon, number of splits is {}, which is not supported'.format(i,len(sp))) return d def _cleanComment(l): """ remove comments from a line""" return l.split('#')[0].strip() + +def _str2dict(string): + string = string.strip('{}') + pairs = string.split(', ') + d={} + for pair in pairs: + print(pair) + sp = pair.split(':') + key = sp[0].strip() + value = sp[1].strip() + try: + d[key] = int(value) + continue + except ValueError: + pass + try: + d[key] = float(value) + continue + except ValueError: + pass + d[key] = value + return d + def _readDashList(iStart, lines): """ """ i=iStart diff --git a/pydatview/io/rosco_performance_file.py b/pydatview/io/rosco_performance_file.py index 7666c4e..8c2f873 100644 --- a/pydatview/io/rosco_performance_file.py +++ b/pydatview/io/rosco_performance_file.py @@ -104,7 +104,7 @@ def write(self, filename=None): # Write write_rotor_performance(self.filename, self['pitch'], self['TSR'], self['CP'],self['CT'], self['CQ'], self['WS'], TurbineName=self.name) - def checkConsistency(self): + def checkConsistency(self, verbose=False): """ Check that data makes sense. in particular, check if CP=lambda CQ @@ -119,10 +119,12 @@ def checkConsistency(self): TSR = np.tile(tsr.flatten(), (len(self['pitch']),1)).T if CQ is None and CP is not None: CQ = CP/TSR - print('[INFO] Computing CQ from CP') + if verbose: + print('[INFO] Computing CQ from CP') elif CQ is not None and CP is None: CP = CQ*TSR - print('[INFO] Computing CP from CQ') + if verbose: + print('[INFO] Computing CP from CQ') elif CQ is not None and CP is not None: pass else: @@ -419,7 +421,7 @@ def write_rotor_performance(txt_filename, pitch, TSR, CP, CT, CQ, WS=None, Turbi file.close() -def interp2d_pairs(*args,**kwargs): +def interp2d_pairs(X,Y,Z,**kwargs): """ Same interface as interp2d but the returned interpolant will evaluate its inputs as pairs of values. Inputs can therefore be arrays @@ -435,12 +437,20 @@ def interp2d_pairs(*args,**kwargs): author: E. Branlard """ import scipy.interpolate as si + # Wrapping the scipy interp2 function to call out interpolant instead + # --- OLD # Internal function, that evaluates pairs of values, output has the same shape as input - def interpolant(x,y,f): + #def interpolant(x,y,f): + # x,y = np.asarray(x), np.asarray(y) + # return (si.dfitpack.bispeu(f.tck[0], f.tck[1], f.tck[2], f.tck[3], f.tck[4], x.ravel(), y.ravel())[0]).reshape(x.shape) + #return lambda x,y: interpolant(x,y,si.interp2d(*args,**kwargs)) + # --- NEW + Finterp = si.RegularGridInterpolator((X,Y), Z.T, **kwargs) + #r = si.RectBivariateSpline(X, Y, Z.T) + def interpolant(x,y): x,y = np.asarray(x), np.asarray(y) - return (si.dfitpack.bispeu(f.tck[0], f.tck[1], f.tck[2], f.tck[3], f.tck[4], x.ravel(), y.ravel())[0]).reshape(x.shape) - # Wrapping the scipy interp2 function to call out interpolant instead - return lambda x,y: interpolant(x,y,si.interp2d(*args,**kwargs)) + return Finterp((x,y)) + return interpolant if __name__ == '__main__': diff --git a/pydatview/tools/colors.py b/pydatview/tools/colors.py index 1e5966e..14b2b50 100644 --- a/pydatview/tools/colors.py +++ b/pydatview/tools/colors.py @@ -1,4 +1,5 @@ import numpy as np +import matplotlib import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.colors as mcolors @@ -10,6 +11,12 @@ # --------------------------------------------------------------------------------} # --- COLOR TOOLS # --------------------------------------------------------------------------------{ +def rgb(r,g,b): + if (r+b+g)<=3: + return np.array([r,g,b]) + else: + return np.array([r/255.,g/255.,b/255.]) + def standardize(col, rgb_int=False): """ given a color return a rgb tuple between 0 and 1 """ if rgb_int: @@ -132,6 +139,19 @@ def make_colormap(seq,values=None,name='CustomMap'): print(cdict) return mcolors.LinearSegmentedColormap(name, cdict) +def cmap_colors(n, name='viridis'): + try: + cmap = matplotlib.colormaps[name] + COLRS = [(cmap(v)[0],cmap(v)[1],cmap(v)[2]) for v in np.linspace(0,1,n)] + except: +# try: +# import matplotlib.cm +# cmap = matplotlib.cm.get_cmap('viridis') +# COLRS = [(cmap(v)[0],cmap(v)[1],cmap(v)[2]) for v in np.linspace(0,1,n)] +# except: + print('[WARN] colors.py: cmap_colors failed.') + COLRS = color_scales(n, color='blue') + return COLRS def color_scales(n, color='blue'): maps={ @@ -153,46 +173,46 @@ def color_scales(n, color='blue'): # --- Diverging Brown Green -DV_BG=[np.array([140,81,10])/255., np.array([191,129,4])/255., np.array([223,194,1])/255., np.array([246,232,1])/255., np.array([245,245,2])/255., np.array([199,234,2])/255., np.array([128,205,1])/255., np.array([53,151,14])/255., np.array([1,102,94 ])/255.] +DV_BG=[rgb(140,81,10), rgb(191,129,4), rgb(223,194,1), rgb(246,232,1), rgb(245,245,2), rgb(199,234,2), rgb(128,205,1), rgb(53,151,14), rgb(1,102,94)] # --- Diverging Red Blue -DV_RB=[ np.array([215,48,39])/255., np.array([244,109,67])/255., np.array([253,174,97])/255., np.array([254,224,144])/255., np.array([255,255,191])/255., np.array([224,243,248])/255., np.array([171,217,233])/255., np.array([116,173,209])/255., np.array([69,117,180])/255.] - +DV_RB=[ rgb(215,48,39), rgb(244,109,67), rgb(253,174,97), rgb(254,224,144), rgb(255,255,191), rgb(224,243,248), rgb(171,217,233), rgb(116,173,209), rgb(69,117,180) ] + # --- Diverging Purple Green -DV_PG=[np.array([118,42,131])/255., np.array([153,112,171])/255., np.array([194,165,207])/255., np.array([231,212,232])/255., np.array([247,247,247])/255., np.array([217,240,211])/255., np.array([166,219,160])/255., np.array([90,174,97])/255., np.array([27,120,55])/255.] +DV_PG=[rgb(118,42,131), rgb(153,112,171), rgb(194,165,207), rgb(231,212,232), rgb(247,247,247), rgb(217,240,211), rgb(166,219,160), rgb(90,174,97), rgb(27,120,55) ] # Maureen Stone, for line plots -MW_Light_Blue = np.array([114,147,203])/255. -MW_Light_Orange = np.array([225,151,76])/255. -MW_Light_Green = np.array([132,186,91])/255. -MW_Light_Red = np.array([211,94,96])/255. -MW_Light_Gray = np.array([128,133,133])/255. -MW_Light_Purple = np.array([144,103,167])/255. -MW_Light_DarkRed = np.array([171,104,87])/255. -MW_Light_Kaki = np.array([204,194,16])/255. - -MW_Blue = np.array([57,106,177])/255. -MW_Orange = np.array([218,124,48])/255. -MW_Green = np.array([62,150,81])/255. -MW_Red = np.array([204,37,41])/255. -MW_Gray = np.array([83,81,84])/255. -MW_Purple = np.array([107,76,154])/255. -MW_DarkRed = np.array([146,36,40])/255. -MW_Kaki = np.array([148,139,61])/255. - -MathematicaBlue = np.array([63 ,63 ,153 ])/255.; -MathematicaRed = np.array([153,61 ,113 ])/255.; -MathematicaGreen = np.array([61 ,153,86 ])/255.; -MathematicaYellow = np.array([152,140,61 ])/255.; -MathematicaLightBlue = np.array([159,159,204 ])/255.; -MathematicaLightRed = np.array([204,158,184 ])/255.; -MathematicaLightGreen = np.array([158,204,170 ])/255.; +MW_Light_Blue = rgb(114,147,203) +MW_Light_Orange = rgb(225,151,76) +MW_Light_Green = rgb(132,186,91) +MW_Light_Red = rgb(211,94,96) +MW_Light_Gray = rgb(128,133,133) +MW_Light_Purple = rgb(144,103,167) +MW_Light_DarkRed = rgb(171,104,87) +MW_Light_Kaki = rgb(204,194,16) + +MW_Blue = rgb(57,106,177) +MW_Orange = rgb(218,124,48) +MW_Green = rgb(62,150,81) +MW_Red = rgb(204,37,41) +MW_Gray = rgb(83,81,84) +MW_Purple = rgb(107,76,154) +MW_DarkRed = rgb(146,36,40) +MW_Kaki = rgb(148,139,61) + +MathematicaBlue = rgb(63 ,63 ,153 ); +MathematicaRed = rgb(153,61 ,113 ); +MathematicaGreen = rgb(61 ,153,86 ); +MathematicaYellow = rgb(152,140,61 ); +MathematicaLightBlue = rgb(159,159,204 ); +MathematicaLightRed = rgb(204,158,184 ); +MathematicaLightGreen = rgb(158,204,170 ); # ManuDarkBlue = np.array([0 ,0 ,0.7 ]) ; -ManuDarkRed = np.array([138 ,42 ,93 ])/255.; -# ManuDarkOrange = np.array([245 ,131,1 ])/255.; -ManuDarkOrange = np.array([198 ,106,1 ])/255.; -ManuLightOrange = np.array([255.,212,96 ])/255.; +ManuDarkRed = rgb(138 ,42 ,93 ); +# ManuDarkOrange = rgb(245 ,131,1 ); +ManuDarkOrange = rgb(198 ,106,1 ); +ManuLightOrange = rgb(255.,212,96 ); # Red = np.array([1 ,0 ,0]); Blue = np.array([0 ,0 ,1]); @@ -205,6 +225,22 @@ def color_scales(n, color='blue'): MatlabYellow = np.array([750.0e-03 ,750.0e-03,0.0e+0 ]); MatlabGrey = np.array([250.0e-03 ,250.0e-03,250.0e-03]); + +# --- Pastel + +Pastel_Beige = rgb(250, 240, 215) # #FAF0D7 +Pastel_Peach = rgb(255, 217, 192) # #FFD9C0 +Pastel_LigtRed = rgb(246, 214, 214) # #F6D6D6 +Pastel_Red = rgb(223, 110, 110) # #df6e6e +Pastel_Yellow = rgb(246, 247, 196) # #F6F7C4 +Pastel_Bleue1 = rgb(140, 192, 222) # #8CC0DE +Pastel_Green1 = rgb(204, 238, 188) # #CCEEBC +Pastel_Green2 = rgb(158, 210, 190) # #9ED2BE +Pastel_Green3 = rgb(126, 170, 146) # #7EAA92 +Pastel_Green4 = rgb( 27, 156, 133) # #1B9C85 +Pastel_Green5 = rgb( 62, 142, 126) # #3E8E7E + + # cRed=plt.cm.Reds(np.linspace(0.9,1,2)) # cGreen=plt.cm.Greens(np.linspace(0.9,1,2)) # cPur=plt.cm.Purples(np.linspace(0.9,1,2)) @@ -334,7 +370,10 @@ def rgb_to_hls(rgb): maxc = rgb.max(-1) # maxc = max(r, g, b) minc = rgb.min(-1) # minc = min(r, g, b) - delta = rgb.ptp(-1) # max-min + try: + delta = np.ptp(rgb, -1) # max-min + except: + delta = rbg.ptp(-1) # max-min # --- Lightness hls[...,1] = (minc+maxc)/2.0 @@ -440,6 +479,18 @@ def hls_to_rgb(hls): return rgb +def PRINT_COLORS(msg): + # See welib.tools.strings + HEADER = '\033[95m' + RED = '\033[91m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + ORAN = '\033[93m' + GREEN = '\033[92m' + ENDC = '\033[0m' + + + class TestColors(unittest.TestCase): def test_rgb_hsv(self): # --- Test Data diff --git a/pydatview/tools/curve_fitting.py b/pydatview/tools/curve_fitting.py index 1fca32e..c5e4028 100644 --- a/pydatview/tools/curve_fitting.py +++ b/pydatview/tools/curve_fitting.py @@ -567,7 +567,7 @@ def __repr__(self): s=s+' - {:15s}: {}\n'.format(k,v) return s - + def set_default_dict(self, varnames, p0, bounds): D = {'varnames':varnames, 'p0':p0, 'bounds':bounds} if all([d is None for dname,d in D.items()]): diff --git a/pydatview/tools/damping.py b/pydatview/tools/damping.py index b37e5ed..4f751b0 100644 --- a/pydatview/tools/damping.py +++ b/pydatview/tools/damping.py @@ -204,7 +204,7 @@ def freqDampFromPeaks(x, t, threshold=None, plot=False, refPoint='mid'): x = x-m # we remove the mean once and for all if threshold is None: threshold = np.mean(abs(x))/3 - + dt = t[1]-t[0] # todo signal with dt not uniform # Is it a decay or an exloding signal diff --git a/pydatview/tools/fatigue.py b/pydatview/tools/fatigue.py index f5b25e0..e0a9577 100644 --- a/pydatview/tools/fatigue.py +++ b/pydatview/tools/fatigue.py @@ -98,6 +98,8 @@ def equivalent_load(time, signal, m=3, Teq=1, bins=100, method='rainflow_windap' DELi = S**m * N / neq Leq = DELi.sum() ** (1/m) # See e.g. eq. (30) of [1] + except ImportError: + raise except: if outputMore: return np.nan, np.nan, np.nan, np.nan, np.nan