@@ -59,7 +59,7 @@ def test_init_missing_prompt_variable(self, monkeypatch):
59
59
prompt_variable = "test2"
60
60
)
61
61
62
- def test_to_dict (self , monkeypatch ):
62
+ def test_to_dict_default_params (self , monkeypatch ):
63
63
monkeypatch .setenv ("OPENAI_API_KEY" , "test-api-key" )
64
64
extractor = LLMMetadataExtractor (
65
65
prompt = "some prompt that was used with the LLM {{test}}" ,
@@ -68,15 +68,18 @@ def test_to_dict(self, monkeypatch):
68
68
prompt_variable = "test" ,
69
69
generator_api_params = {'model' : 'gpt-4o-mini' , 'generation_kwargs' : {"temperature" : 0.5 }},
70
70
raise_on_failure = True )
71
+
71
72
extractor_dict = extractor .to_dict ()
73
+
72
74
assert extractor_dict == {
73
75
'type' : 'haystack_experimental.components.extractors.llm_metadata_extractor.LLMMetadataExtractor' ,
74
76
'init_parameters' : {
75
77
'prompt' : 'some prompt that was used with the LLM {{test}}' ,
76
78
'expected_keys' : ['key1' , 'key2' ],
77
79
'raise_on_failure' : True ,
78
- 'input_text ' : 'test' ,
80
+ 'prompt_variable ' : 'test' ,
79
81
'generator_api' : 'openai' ,
82
+ 'page_range' : None ,
80
83
'generator_api_params' : {
81
84
'api_base_url' : None ,
82
85
'api_key' : {'env_vars' : ['OPENAI_API_KEY' ],'strict' : True ,'type' : 'env_var' },
0 commit comments