@@ -56,6 +56,7 @@ def rewoo_answer_fixture(request: pytest.FixtureRequest, rewoo_data: list[dict])
5656 return rewoo_data [request .param ]["answer" ].lower ()
5757
5858
59+ @pytest .mark .skip (reason = "Temporarily skip rewoo tests due to long run times, re-enable once the workflows are updated." )
5960@pytest .mark .usefixtures ("nvidia_api_key" , "tavily_api_key" )
6061@pytest .mark .integration
6162@pytest .mark .parametrize ("use_rest_api" , [False , True ], ids = ["nat_run" , "nat_serve" ])
@@ -70,6 +71,7 @@ async def test_rewoo_full_workflow(agents_dir: Path, use_rest_api: bool, rewoo_q
7071 await run_workflow (config_file = config_file , question = rewoo_question , expected_answer = rewoo_answer )
7172
7273
74+ @pytest .mark .skip (reason = "Temporarily skip agent tests due to long run times, re-enable once the workflows are updated." )
7375@pytest .mark .slow
7476@pytest .mark .integration
7577@pytest .mark .usefixtures ("nvidia_api_key" )
@@ -94,13 +96,15 @@ async def test_agent_full_workflow(agents_dir: Path, config_file: str, use_rest_
9496
9597# Code examples from `docs/source/resources/running-tests.md`
9698# Intentionally not using the fixtures defined above to keep the examples clear
99+ @pytest .mark .skip (reason = "Temporarily skip agent tests due to long run times, re-enable once the workflows are updated." )
97100@pytest .mark .integration
98101@pytest .mark .usefixtures ("nvidia_api_key" )
99102async def test_react_agent_full_workflow (examples_dir : Path ):
100103 config_file = examples_dir / "agents/react/configs/config.yml"
101104 await run_workflow (config_file = config_file , question = "What are LLMs?" , expected_answer = "Large Language Model" )
102105
103106
107+ @pytest .mark .skip (reason = "Temporarily skip agent tests due to long run times, re-enable once the workflows are updated." )
104108@pytest .mark .integration
105109@pytest .mark .usefixtures ("nvidia_api_key" )
106110async def test_react_agent_full_workflow_validate_re (examples_dir : Path ):
0 commit comments