Skip to content

Commit

Permalink
Merge branch 'main' into upgrade-timeseries
Browse files Browse the repository at this point in the history
  • Loading branch information
ann-sherin committed Jun 13, 2023
2 parents 29fc18c + ede28fc commit 72ee46d
Show file tree
Hide file tree
Showing 32 changed files with 888 additions and 83 deletions.
32 changes: 14 additions & 18 deletions disco/cli/cba_post_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,16 @@ def add_curtailment_columns(filename, curtailment_tolerance):
df["PVSystems__Curtailment__residential (kWh)"] = 0.0
columns = ["substation", "feeder", "placement", "sample", "penetration_level"]

def calc_curtailment(pf1, control_mode):
def calc_energy_curtailed(pf1, control_mode):
"""Calculate curtailment."""
if pf1 == 0:
return 0

# We may need to consider using this tolerance in the future.
# if pf1 < curtailment_tolerance:
# return 0
if pf1 < curtailment_tolerance:
return 0
diff = pf1 - control_mode
#if diff < 0 and abs(diff) < curtailment_tolerance:
# return 0
return diff / pf1
if diff < 0 and abs(diff) < curtailment_tolerance:
return 0
return diff

for (substation, feeder, placement, sample, penetration_level), tdf in df.groupby(by=columns):
for customer_type in ("commercial", "residential"):
Expand All @@ -33,7 +31,7 @@ def calc_curtailment(pf1, control_mode):
if sim_vals.empty:
continue
pf1 = tdf.query("scenario == 'pf1'")[power_col]
curtailment = pf1.combine(sim_vals, calc_curtailment)
curtailment = pf1.combine(sim_vals, calc_energy_curtailed)
cond = lambda x: (
(x["substation"] == substation)
& (x["feeder"] == feeder)
Expand All @@ -50,15 +48,13 @@ def calc_curtailment(pf1, control_mode):

@click.command()
@click.argument("output_dir")
# This is disabled because we don't know the best tolerance for these customer-type
# aggregations. Leaving it in the code in case we need it in the future.
# @click.option(
# "-d", "--curtailment-tolerance",
# default=0.0001,
# show_default=True,
# help="Set curtailment to 0 if the diff is less than this value.",
# )
def cba_post_process(output_dir, curtailment_tolerance=0.0001):
@click.option(
"-d", "--curtailment-tolerance",
default=0.001,
show_default=True,
help="Set curtailment to 0 if the diff is less than this value.",
)
def cba_post_process(output_dir, curtailment_tolerance=0.001):
"""Perform post-processing of CBA tables."""
add_curtailment_columns(output_dir, curtailment_tolerance)

Expand Down
25 changes: 14 additions & 11 deletions disco/cli/config_snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def snapshot(
for i in indices_to_pop.reverse():
scenarios_config.pop(i)
logger.info(
"Excluding %s scenarios because there are no pydss controllers.", CONTROL_MODE_SCENARIO
"Excluding %s scenario because there are no pydss controllers.", CONTROL_MODE_SCENARIO
)
config.set_pydss_config(ConfigType.SCENARIOS, scenarios_config)

Expand Down Expand Up @@ -241,18 +241,19 @@ def make_simulation_config(
if report["name"] in ("Thermal Metrics", "Voltage Metrics"):
report["store_per_element_data"] = store_per_element_data

names = []
scenarios = []
if not include_control_mode and not include_pf1:
logger.error("At least one of 'include_pf1' and 'include_control_mode' must be set.")
sys.exit(1)
if include_control_mode:
names.append(CONTROL_MODE_SCENARIO)
if include_pf1:
names.append(PF1_SCENARIO)

if with_loadshape:
simulation_config["project"]["simulation_type"] = SimulationType.QSTS.value
names = []
if not include_control_mode and not include_pf1:
logger.error("At least one of 'include_pf1' and 'include_control_mode' must be set.")
sys.exit(1)
if include_control_mode:
names.append(CONTROL_MODE_SCENARIO)
if include_pf1:
names.append(PF1_SCENARIO)
if auto_select_time_points:
scenarios = []
for scenario_name in names:
for mode in SnapshotTimePointSelectionMode:
if mode == SnapshotTimePointSelectionMode.NONE:
Expand All @@ -271,11 +272,13 @@ def make_simulation_config(
else:
exports = {} if exports_filename is None else load_data(exports_filename)
simulation_config["project"]["simulation_type"] = SimulationType.SNAPSHOT.value
scenarios = [PyDssConfiguration.make_default_pydss_scenario(x) for x in names]
scenarios = [
PyDssConfiguration.make_default_pydss_scenario(
"scenario",
x,
exports=exports,
)
for x in names
]

return simulation_config, scenarios
Expand Down
8 changes: 6 additions & 2 deletions disco/cli/make_summary_tables.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,8 +236,12 @@ def get_voltage_metrics(results: PyDssResults, job_info: JobInfo):

def get_snapshot_time_points_table(results: PyDssResults, job_info: JobInfo):
"""Return the snapshot time points determined by each job."""
snapshot_time_points_table = []
data = json.loads(results.read_file(f"Exports/snapshot_time_points.json"))
try:
data = json.loads(results.read_file(f"Exports/snapshot_time_points.json"))
except KeyError:
# Time points are only available if load shapes are used.
return []

row = {"name": job_info.name}
for time_point in SNAPSHOT_TIME_POINT_MAPPING:
timestamp = data.get(SNAPSHOT_TIME_POINT_MAPPING[time_point])
Expand Down
2 changes: 2 additions & 0 deletions disco/pipelines/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,13 +266,15 @@ def create_postprocess_auto_config_text_file(self):
text_file = self.get_postprocess_auto_config_text_file()
with open(text_file, "w") as f:
f.write(auto_config_command)
f.write("\n")
return text_file

def create_postprocess_command_text_file(self):
text_file = "pipeline-postprocess-command.txt"
command = self.make_postprocess_command()
with open(text_file, "w") as f:
f.write(command)
f.write("\n")
return text_file

def make_prescreen_stage(self):
Expand Down
42 changes: 22 additions & 20 deletions disco/pipelines/creator.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,22 @@ def make_model_transform_command(self):
return command

def make_disco_config_command(self, section):
if self.template.preconfigured:
model_inputs = self.template.inputs
# self.template reads the file fresh every time.
# TODO: change the rest of this file to not constantly re-read it.
template = self.template
if template.preconfigured:
model_inputs = template.inputs
else:
model_inputs = self.template.get_model_transform_output()
options = self.template.get_config_options(section)
model_inputs = template.get_model_transform_output()
options = template.get_config_options(section)
# The code that builds the option string excludes False values but we need it here.
if not template.get_config_params(TemplateSection.SIMULATION)["with_loadshape"]:
options += " --no-with-loadshape"
reports_filename = "generated_snapshot_reports.toml"
dump_data(self.template.reports, reports_filename)
dump_data(template.reports, reports_filename)
exports_filename = get_default_exports_file(
SimulationType.SNAPSHOT,
AnalysisType(self.template.analysis_type),
AnalysisType(template.analysis_type),
)
command = (
f"disco config snapshot {model_inputs} "
Expand Down Expand Up @@ -76,21 +82,17 @@ def make_postprocess_command(self):
pf1 = config_params["pf1"]
base_cmd = f"disco-internal compute-hosting-capacity {inputs}"
plot_cmd = f"disco-internal plot {inputs}"
if with_loadshape:
scenarios = [CONTROL_MODE_SCENARIO]
if pf1:
scenarios.append(PF1_SCENARIO)
if auto_select_time_points:
for scenario in scenarios:
for mode in SnapshotTimePointSelectionMode:
if mode != SnapshotTimePointSelectionMode.NONE:
commands.append(f"{base_cmd} --scenario={scenario} --time-point={mode.value}")
else:
for scenario in scenarios:
commands.append(f"{base_cmd} --scenario={scenario}")
scenarios = [CONTROL_MODE_SCENARIO]
if pf1:
scenarios.append(PF1_SCENARIO)
if with_loadshape and auto_select_time_points:
for scenario in scenarios:
for mode in SnapshotTimePointSelectionMode:
if mode != SnapshotTimePointSelectionMode.NONE:
commands.append(f"{base_cmd} --scenario={scenario} --time-point={mode.value}")
else:
commands.append(f"{base_cmd} --scenario=scenario")
scenarios = ["scenario"]
for scenario in scenarios:
commands.append(f"{base_cmd} --scenario={scenario}")

# Plot
for scenario in scenarios:
Expand Down
13 changes: 13 additions & 0 deletions disco/pydss/config/pv_controllers.toml
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,16 @@ percent_p_cutout = 10
Efficiency = 100
Priority = "Var"
DampCoef = 0.8

[volt_watt]
Control1 = "VW"
Control2 = "None"
Control3 = "None"
DampCoef = 0.8
PFlim = 0.9
PminVW = 20
Priority = "Watt"
QlimPU = 0.44
VWtype = "Available Power"
uMaxC = 1.1
uMinC = 1.06
2 changes: 1 addition & 1 deletion disco/sources/source_tree_1/source_tree_1_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -824,7 +824,7 @@ def get_pydss_controller_and_profiles(pv_configs):
pv_config["pydss_controller"]
)
pydss_controllers.add(ctrl)
pv_profiles[pv_config["name"]] = pv_config["pv_profile"]
pv_profiles[pv_config["name"]] = pv_config.get("pv_profile")

if len(pydss_controllers) > 1:
raise Exception(
Expand Down
2 changes: 1 addition & 1 deletion disco/storage/ingesters.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def ingest(self, objects):
columns = self.data_class.__table__.columns.keys()
data = [tuple([item[column] for column in columns]) for item in objects]
self._perform_ingestion(columns=columns, data=data)
if "id" in objects[0]:
if objects and "id" in objects[0]:
indexes = {self._generate_identifier(item): item["id"] for item in objects}
else:
indexes = []
Expand Down
21 changes: 15 additions & 6 deletions disco/storage/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,9 +174,15 @@ def parse(self, config_file, snapshot_time_points_table):
return self._parse_scenarios_from_config_file(config_file)

def _parse_scenarios_from_snapshot_time_points(self, snapshot_time_points_table):
mapping = {job["name"]: job for job in self.jobs}
df = pd.read_csv(snapshot_time_points_table)
time_points = {item["name"]: item for item in df.to_dict(orient="records")}
df = None
try:
df = pd.read_csv(snapshot_time_points_table)
except pd.errors.EmptyDataError:
pass
if df is None or df.empty:
time_points = {}
else:
time_points = {item["name"]: item for item in df.to_dict(orient="records")}

scenarios = []
for job in self.jobs:
Expand All @@ -193,7 +199,7 @@ def _parse_scenarios_from_snapshot_time_points(self, snapshot_time_points_table)
"job_id": job["id"],
"simulation_type": "snapshot",
"name": _scenario_name,
"start_time": tp[mode.value],
"start_time": tp.get(mode.value),
"end_time": None
})
return scenarios
Expand Down Expand Up @@ -421,8 +427,11 @@ def __init__(self, report, jobs):
def parse(self, output):
"""Parse time points data for snapshot simulation"""
logger.info("Parsing data - 'snapshot_time_points'...")
df = pd.read_csv(output.snapshot_time_points_table)
data = df.to_dict(orient="records")
try:
df = pd.read_csv(output.snapshot_time_points_table)
data = df.to_dict(orient="records")
except pd.errors.EmptyDataError:
data = {}
data = self._set_record_index(data)
return data

Expand Down
2 changes: 1 addition & 1 deletion docs/source/analysis-workflows/upgrade-cost-analysis.rst
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ Single Execution Mode
~~~~~~~~~~~~~~~~~~~~~
1. Configure the simulation parameters and in an input JSON file called ``upgrades.json``.
Refer to this
`file <https://github.com/NREL/disco/blob/main/tests/data/test_upgrade_cost_analysis_generic.json>`_
`file <https://github.com/NREL/disco/blob/main/tests/data/upgrade_cost_analysis_generic.json>`_
as an example. The JSON schemas are defined in :ref:`upgrade_cost_analysis_schemas`.

Each job represents one OpenDSS network and one upgrade simulation.
Expand Down
4 changes: 2 additions & 2 deletions docs/source/installation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ Conda Installation

.. code-block:: bash
$ conda create -n disco python=3.9
$ conda create -n disco python=3.10
$ conda activate disco
Optional: Install extra packages.
Expand Down Expand Up @@ -59,7 +59,7 @@ Follow these instructions if you will be developing DISCO code and running tests

.. code-block:: bash
$ git clone git@github.com:NREL/disco.git
$ git clone https://github.com/NREL/disco.git
$ cd disco
$ pip install -e '.[dev]'
Expand Down
7 changes: 7 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,10 @@ def smart_ds_substations():
return os.path.join(
os.path.dirname(__file__), "data", "smart-ds", "substations"
)


@pytest.fixture
def smart_ds_substations_no_loadshapes():
return os.path.join(
os.path.dirname(__file__), "data", "smart-ds", "substations-no-load-shapes"
)
1 change: 1 addition & 0 deletions tests/data/smart-ds/substations-no-load-shapes/format.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
type = "SourceTree1Model"
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
p1udm16432 636391.0 3373903.0
p1udm31921 636334.925 3373890.525
p1udm40050 636331.415 3373884.675
p1udt2569 636296.634325 3373735.4514315
p1udt2569lv 636292.0541 3373734.40681
p1udt4186 636157.96071 3373564.57017
p1udt4186lv 636154.2346825 3373567.4315155
p1udt4688 636379.345774 3373900.685675
p1udt4688lv 636378.654226 3373905.314325
p1udt5257 636261.2346825 3373483.4315155
p1udt5257lv 636256.80464 3373484.998295
p1udt8738 636317.345774 3373849.685675
p1udt8738lv 636316.654226 3373854.314325
p1udt9514 636348.345774 3373894.685675
p1udt9514lv 636347.654226 3373899.314325
p1udt9880 636640.654226 3373864.314325
p1udt9880lv 636641.345774 3373859.685675
p1udt11629 636474.345774 3373888.685675
p1udt11629lv 636473.654226 3373893.314325
p1udt12324 636323.345774 3373867.685675
p1udt12324lv 636322.654226 3373872.314325
p1udt14166 636457.345774 3373891.685675
p1udt14166lv 636456.654226 3373896.314325
p1udt17537 636422.345774 3373896.685675
p1udt17537lv 636421.654226 3373901.314325
p1udt18129 636286.654226 3373660.314325
p1udt18129lv 636287.345774 3373655.685675
p1udt19673 636395.80464 3373514.998295
p1udt19673lv 636400.2346825 3373513.4315155
p1udt21477 636301.328625 3373735.347191
p1udt21477lv 636305.98295 3373734.7945675
p1udt21744 636281.654226 3373621.314325
p1udt21744lv 636282.345774 3373616.685675
p1udt24034 636424.345774 3373658.685675
p1udt24034lv 636423.654226 3373663.314325
p1udt24044 636266.99145965 3373522.01314495
p1udt24044lv 636269.059915 3373526.743765
p1udt24356 636358.80464 3373436.998295
p1udt24356lv 636363.2346825 3373435.4315155
p1udt24768 636410.96071 3373570.57017
p1udt24768lv 636407.2346825 3373573.4315155
p1ulv2723 636242.0 3373751.0
p1ulv4595 636149.80464 3373568.998295
p1ulv5237 636380.0 3373963.0
p1ulv5951 636212.0 3373493.0
p1ulv10887 636283.0 3373848.0
p1ulv12152 636320.0 3373952.0
p1ulv12765 636666.0 3373909.0
p1ulv15856 636480.345774 3373937.685675
p1ulv17169 636219.0 3373922.0
p1ulv19767 636660.0 3373901.0
p1ulv21045 636453.0 3373884.0
p1ulv24237 636327.0 3373957.0
p1ulv29261 636235.0 3373951.0
p1ulv29904 636422.0 3373929.0
p1ulv31790 636196.0 3373672.0
p1ulv36321 636278.0 3373895.0
p1ulv37000 636403.96071 3373510.57017
p1ulv44471 636343.0 3373731.0
p1ulv45758 636229.0 3373623.0
p1ulv53232 636363.0 3373858.0
p1ulv57547 636447.0 3373635.0
p1ulv57596 636181.0 3373534.0
p1ulv59420 636366.96071 3373432.57017
p1ulv61813 636402.80464 3373574.998295
p1ulv61814 636397.0 3373967.0
p1ulv62436 636212.0 3373756.0
p1ulv63094 636402.0 3373688.0
p1ulv64357 636256.0 3373924.0
p1ulv64358 636371.0 3373985.0
p1udt24044-p1udt4186x 636264.97616 3373517.25721
p1udt19673-p1udt24044x 636262.24122 3373524.031835
p1udt21744-p1udt24044x 636271.73122 3373519.95406
p1udt24356-p1udt5257x 636264.96071 3373480.57017
p1udt5257-p1uhs21_1247x 636287.329925 3373377.642027
Loading

0 comments on commit 72ee46d

Please sign in to comment.