Skip to content

Commit 774a6c0

Browse files
authored
Merge pull request #279 from posit-dev/ci-test-yaml-file
ci: add more tests for `yaml.py`
2 parents e56f0cb + 34974a9 commit 774a6c0

File tree

2 files changed

+275
-19
lines changed

2 files changed

+275
-19
lines changed

pointblank/yaml.py

Lines changed: 23 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,7 @@ def _validate_schema(self, config: dict) -> None:
321321
raise YAMLValidationError("YAML must contain 'steps' field")
322322

323323
if not isinstance(config["steps"], list):
324-
raise YAMLValidationError("'steps' must be a list")
324+
raise YAMLValidationError("'steps' must be a list") # pragma: no cover
325325

326326
if len(config["steps"]) == 0:
327327
raise YAMLValidationError("'steps' cannot be empty")
@@ -412,9 +412,9 @@ def _load_data_source(self, tbl_spec: str, df_library: str = "polars") -> Any:
412412
if processed_data is processed_tbl_spec and isinstance(processed_tbl_spec, str):
413413
return load_dataset(processed_tbl_spec, tbl_type=df_library)
414414
else:
415-
return processed_data
415+
return processed_data # pragma: no cover
416416

417-
except Exception as e:
417+
except Exception as e: # pragma: no cover
418418
raise YAMLValidationError(f"Failed to load data source '{tbl_spec}': {e}")
419419

420420
def _load_csv_file(self, file_path: str, df_library: str) -> Any:
@@ -458,16 +458,16 @@ def _load_csv_file(self, file_path: str, df_library: str) -> Any:
458458

459459
elif df_library == "duckdb":
460460
# For DuckDB, we'll use the existing _process_data since it handles DuckDB
461-
from pointblank.validate import _process_data
461+
from pointblank.validate import _process_data # pragma: no cover
462462

463-
return _process_data(file_path)
463+
return _process_data(file_path) # pragma: no cover
464464

465465
else:
466466
raise YAMLValidationError(
467467
f"Unsupported df_library: {df_library}. Use 'polars', 'pandas', or 'duckdb'"
468468
)
469469

470-
except Exception as e:
470+
except Exception as e: # pragma: no cover
471471
raise YAMLValidationError(
472472
f"Failed to load CSV file '{file_path}' with {df_library}: {e}"
473473
)
@@ -659,7 +659,7 @@ def _parse_validation_step(
659659
if isinstance(expr, str):
660660
lambda_expressions.append(_safe_eval_python_code(expr))
661661
else:
662-
lambda_expressions.append(expr)
662+
lambda_expressions.append(expr) # pragma: no cover
663663
# Pass expressions as positional arguments (stored as special key)
664664
parameters["_conjointly_expressions"] = lambda_expressions
665665
else:
@@ -1329,15 +1329,15 @@ def yaml_to_python(yaml: Union[str, Path]) -> str:
13291329
"""
13301330
# First, parse the raw YAML to detect Polars/Pandas expressions in the source code
13311331
if isinstance(yaml, Path):
1332-
yaml_content = yaml.read_text()
1332+
yaml_content = yaml.read_text() # pragma: no cover
13331333
elif isinstance(yaml, str):
13341334
# Check if it's a file path (single line, reasonable length, no newlines)
13351335
if len(yaml) < 260 and "\n" not in yaml and Path(yaml).exists():
13361336
yaml_content = Path(yaml).read_text()
13371337
else:
13381338
yaml_content = yaml
13391339
else:
1340-
yaml_content = str(yaml)
1340+
yaml_content = str(yaml) # pragma: no cover
13411341

13421342
# Track whether we need to import Polars and Pandas by analyzing the raw YAML content
13431343
needs_polars_import = False
@@ -1432,7 +1432,7 @@ def extract_python_expressions(obj, path=""):
14321432
validate_args.append(f'data=pb.load_dataset("{tbl_spec}", tbl_type="{df_library}")')
14331433
else:
14341434
# Fallback to placeholder if we couldn't extract the original expression
1435-
validate_args.append("data=<python_expression_result>")
1435+
validate_args.append("data=<python_expression_result>") # pragma: no cover
14361436

14371437
# Add table name if present
14381438
if "tbl_name" in config:
@@ -1465,7 +1465,7 @@ def extract_python_expressions(obj, path=""):
14651465
action_params.append(f'{key}="{value}"')
14661466
else:
14671467
# For callables or complex expressions, use placeholder
1468-
action_params.append(f"{key}={value}")
1468+
action_params.append(f"{key}={value}") # pragma: no cover
14691469
actions_str = "pb.Actions(" + ", ".join(action_params) + ")"
14701470
validate_args.append(f"actions={actions_str}")
14711471

@@ -1552,13 +1552,13 @@ def extract_python_expressions(obj, path=""):
15521552
expressions_str = "[" + ", ".join([f'"{expr}"' for expr in value]) + "]"
15531553
param_parts.append(f"expressions={expressions_str}")
15541554
else:
1555-
param_parts.append(f"expressions={value}")
1555+
param_parts.append(f"expressions={value}") # pragma: no cover
15561556
elif key == "expr" and method_name == "specially":
15571557
# Handle specially expr parameter: should be unquoted lambda expression
15581558
if isinstance(value, str):
15591559
param_parts.append(f"expr={value}")
15601560
else:
1561-
param_parts.append(f"expr={value}")
1561+
param_parts.append(f"expr={value}") # pragma: no cover
15621562
elif key in ["columns", "columns_subset"]:
15631563
if isinstance(value, list):
15641564
if len(value) == 1:
@@ -1569,7 +1569,7 @@ def extract_python_expressions(obj, path=""):
15691569
columns_str = "[" + ", ".join([f'"{col}"' for col in value]) + "]"
15701570
param_parts.append(f"{key}={columns_str}")
15711571
else:
1572-
param_parts.append(f'{key}="{value}"')
1572+
param_parts.append(f'{key}="{value}"') # pragma: no cover
15731573
elif key == "brief":
15741574
# Handle `brief=` parameter: can be a boolean or a string
15751575
if isinstance(value, bool):
@@ -1592,25 +1592,29 @@ def extract_python_expressions(obj, path=""):
15921592
elif isinstance(value.warning, list) and len(value.warning) == 1:
15931593
action_params.append(f'warning="{value.warning[0]}"')
15941594
else:
1595-
action_params.append(f"warning={value.warning}")
1595+
action_params.append(f"warning={value.warning}") # pragma: no cover
15961596

15971597
if value.error is not None:
15981598
error_expr_path = f"{step_action_base}.error"
15991599
if error_expr_path in step_expressions:
1600-
action_params.append(f"error={step_expressions[error_expr_path]}")
1600+
action_params.append(
1601+
f"error={step_expressions[error_expr_path]}"
1602+
) # pragma: no cover
16011603
elif isinstance(value.error, list) and len(value.error) == 1:
16021604
action_params.append(f'error="{value.error[0]}"')
16031605
else:
1604-
action_params.append(f"error={value.error}")
1606+
action_params.append(f"error={value.error}") # pragma: no cover
16051607

16061608
if value.critical is not None:
16071609
critical_expr_path = f"{step_action_base}.critical"
16081610
if critical_expr_path in step_expressions:
1609-
action_params.append(f"critical={step_expressions[critical_expr_path]}")
1611+
action_params.append(
1612+
f"critical={step_expressions[critical_expr_path]}"
1613+
) # pragma: no cover
16101614
elif isinstance(value.critical, list) and len(value.critical) == 1:
16111615
action_params.append(f'critical="{value.critical[0]}"')
16121616
else:
1613-
action_params.append(f"critical={value.critical}")
1617+
action_params.append(f"critical={value.critical}") # pragma: no cover
16141618

16151619
if hasattr(value, "highest_only") and value.highest_only is not True:
16161620
action_params.append(f"highest_only={value.highest_only}")

0 commit comments

Comments
 (0)