From 28eeaa04e6c87640bedcdfc0e92c75ddfed85895 Mon Sep 17 00:00:00 2001 From: pyranota Date: Thu, 23 May 2024 19:02:28 +0200 Subject: [PATCH] Migrate openai from 0.28.1 to 1.30.2 --- bot/bot.py | 14 +++---- bot/openai_utils.py | 94 +++++++++++++++++++-------------------------- requirements.txt | 2 +- 3 files changed, 48 insertions(+), 62 deletions(-) diff --git a/bot/bot.py b/bot/bot.py index 033639774..8f7d77260 100644 --- a/bot/bot.py +++ b/bot/bot.py @@ -308,7 +308,7 @@ async def fake_gen(): , "bot": answer, "date": datetime.now()} else: new_dialog_message = {"user": [{"type": "text", "text": message}], "bot": answer, "date": datetime.now()} - + db.set_dialog_messages( user_id, db.get_dialog_messages(user_id, dialog_id=None) + [new_dialog_message], @@ -406,12 +406,12 @@ async def fake_gen(): gen = fake_gen() prev_answer = "" - + async for gen_item in gen: status, answer, (n_input_tokens, n_output_tokens), n_first_dialog_messages_removed = gen_item answer = answer[:4096] # telegram message limit - + # update only when 100 new symbols are ready if abs(len(answer) - len(prev_answer)) < 100 and status != "finished": continue @@ -425,9 +425,9 @@ async def fake_gen(): await context.bot.edit_message_text(answer, chat_id=placeholder_message.chat_id, message_id=placeholder_message.message_id) await asyncio.sleep(0.01) # wait a bit to avoid flooding - + prev_answer = answer - + # update user data new_dialog_message = {"user": [{"type": "text", "text": _message}], "bot": answer, "date": datetime.now()} @@ -511,7 +511,7 @@ async def voice_message_handle(update: Update, context: CallbackContext): voice = update.message.voice voice_file = await context.bot.get_file(voice.file_id) - + # store file in memory, not on disk buf = io.BytesIO() await voice_file.download_to_memory(buf) @@ -541,7 +541,7 @@ async def generate_image_handle(update: Update, context: CallbackContext, messag try: image_urls = await openai_utils.generate_images(message, n_images=config.return_n_generated_images, size=config.image_size) - except openai.error.InvalidRequestError as e: + except openai.InvalidRequestError as e: if str(e).startswith("Your request was rejected as a result of our safety system"): text = "🥲 Your request doesn't comply with OpenAI's usage policies.\nWhat did you write there, huh?" await update.message.reply_text(text, parse_mode=ParseMode.HTML) diff --git a/bot/openai_utils.py b/bot/openai_utils.py index 005f80139..b30646b36 100644 --- a/bot/openai_utils.py +++ b/bot/openai_utils.py @@ -5,12 +5,11 @@ import tiktoken import openai +from openai import AsyncOpenAI + +aclient = AsyncOpenAI(api_key=config.openai_api_key) -# setup openai -openai.api_key = config.openai_api_key -if config.openai_api_base is not None: - openai.api_base = config.openai_api_base logger = logging.getLogger(__name__) @@ -19,8 +18,7 @@ "max_tokens": 1000, "top_p": 1, "frequency_penalty": 0, - "presence_penalty": 0, - "request_timeout": 60.0, + "presence_penalty": 0 } @@ -40,26 +38,22 @@ async def send_message(self, message, dialog_messages=[], chat_mode="assistant") if self.model in {"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "gpt-4", "gpt-4-1106-preview", "gpt-4-vision-preview"}: messages = self._generate_prompt_messages(message, dialog_messages, chat_mode) - r = await openai.ChatCompletion.acreate( - model=self.model, - messages=messages, - **OPENAI_COMPLETION_OPTIONS - ) + r = await aclient.chat.completions.create(model=self.model, + messages=messages, + **OPENAI_COMPLETION_OPTIONS) answer = r.choices[0].message["content"] elif self.model == "text-davinci-003": prompt = self._generate_prompt(message, dialog_messages, chat_mode) - r = await openai.Completion.acreate( - engine=self.model, - prompt=prompt, - **OPENAI_COMPLETION_OPTIONS - ) + r = await aclient.completions.create(engine=self.model, + prompt=prompt, + **OPENAI_COMPLETION_OPTIONS) answer = r.choices[0].text else: raise ValueError(f"Unknown model: {self.model}") answer = self._postprocess_answer(answer) n_input_tokens, n_output_tokens = r.usage.prompt_tokens, r.usage.completion_tokens - except openai.error.InvalidRequestError as e: # too many tokens + except openai.BadRequestError as e: # too many tokens if len(dialog_messages) == 0: raise ValueError("Dialog messages is reduced to zero, but still has too many tokens to make completion") from e @@ -81,33 +75,29 @@ async def send_message_stream(self, message, dialog_messages=[], chat_mode="assi if self.model in {"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "gpt-4", "gpt-4-1106-preview"}: messages = self._generate_prompt_messages(message, dialog_messages, chat_mode) - r_gen = await openai.ChatCompletion.acreate( - model=self.model, - messages=messages, - stream=True, - **OPENAI_COMPLETION_OPTIONS - ) + r_gen = await aclient.chat.completions.create(model=self.model, + messages=messages, + stream=True, + **OPENAI_COMPLETION_OPTIONS) answer = "" async for r_item in r_gen: delta = r_item.choices[0].delta - if "content" in delta: + if delta.content != None: answer += delta.content n_input_tokens, n_output_tokens = self._count_tokens_from_messages(messages, answer, model=self.model) n_first_dialog_messages_removed = 0 yield "not_finished", answer, (n_input_tokens, n_output_tokens), n_first_dialog_messages_removed - + elif self.model == "text-davinci-003": prompt = self._generate_prompt(message, dialog_messages, chat_mode) - r_gen = await openai.Completion.acreate( - engine=self.model, - prompt=prompt, - stream=True, - **OPENAI_COMPLETION_OPTIONS - ) + r_gen = await aclient.completions.create(engine=self.model, + prompt=prompt, + stream=True, + **OPENAI_COMPLETION_OPTIONS) answer = "" async for r_item in r_gen: @@ -118,7 +108,7 @@ async def send_message_stream(self, message, dialog_messages=[], chat_mode="assi answer = self._postprocess_answer(answer) - except openai.error.InvalidRequestError as e: # too many tokens + except openai.BadRequestError as e: # too many tokens if len(dialog_messages) == 0: raise e @@ -142,11 +132,9 @@ async def send_vision_message( messages = self._generate_prompt_messages( message, dialog_messages, chat_mode, image_buffer ) - r = await openai.ChatCompletion.acreate( - model=self.model, - messages=messages, - **OPENAI_COMPLETION_OPTIONS - ) + r = await aclient.chat.completions.create(model=self.model, + messages=messages, + **OPENAI_COMPLETION_OPTIONS) answer = r.choices[0].message.content else: raise ValueError(f"Unsupported model: {self.model}") @@ -156,7 +144,7 @@ async def send_vision_message( r.usage.prompt_tokens, r.usage.completion_tokens, ) - except openai.error.InvalidRequestError as e: # too many tokens + except openai.BadRequestError as e: # too many tokens if len(dialog_messages) == 0: raise ValueError( "Dialog messages is reduced to zero, but still has too many tokens to make completion" @@ -190,18 +178,16 @@ async def send_vision_message_stream( messages = self._generate_prompt_messages( message, dialog_messages, chat_mode, image_buffer ) - - r_gen = await openai.ChatCompletion.acreate( - model=self.model, - messages=messages, - stream=True, - **OPENAI_COMPLETION_OPTIONS, - ) + + r_gen = await aclient.chat.completions.create(model=self.model, + messages=messages, + stream=True, + **OPENAI_COMPLETION_OPTIONS) answer = "" async for r_item in r_gen: delta = r_item.choices[0].delta - if "content" in delta: + if delta.content != None: answer += delta.content ( n_input_tokens, @@ -219,7 +205,7 @@ async def send_vision_message_stream( answer = self._postprocess_answer(answer) - except openai.error.InvalidRequestError as e: # too many tokens + except openai.BadRequestError as e: # too many tokens if len(dialog_messages) == 0: raise e # forget first message in dialog_messages @@ -254,11 +240,11 @@ def _generate_prompt_messages(self, message, dialog_messages, chat_mode, image_b prompt = config.chat_modes[chat_mode]["prompt_start"] messages = [{"role": "system", "content": prompt}] - + for dialog_message in dialog_messages: messages.append({"role": "user", "content": dialog_message["user"]}) messages.append({"role": "assistant", "content": dialog_message["bot"]}) - + if image_buffer is not None: messages.append( { @@ -274,7 +260,7 @@ def _generate_prompt_messages(self, message, dialog_messages, chat_mode, image_b } ] } - + ) else: messages.append({"role": "user", "content": message}) @@ -342,16 +328,16 @@ def _count_tokens_from_prompt(self, prompt, answer, model="text-davinci-003"): async def transcribe_audio(audio_file) -> str: - r = await openai.Audio.atranscribe("whisper-1", audio_file) + r = await aclient.audio.transcribe("whisper-1", audio_file) return r["text"] or "" async def generate_images(prompt, n_images=4, size="512x512"): - r = await openai.Image.acreate(prompt=prompt, n=n_images, size=size) + r = await aclient.images.generate(prompt=prompt, n=n_images, size=size) image_urls = [item.url for item in r.data] return image_urls async def is_content_acceptable(prompt): - r = await openai.Moderation.acreate(input=prompt) - return not all(r.results[0].categories.values()) \ No newline at end of file + r = await aclient.moderations.create(input=prompt) + return not all(r.results[0].categories.values()) diff --git a/requirements.txt b/requirements.txt index e9b435a57..20b42c002 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ python-telegram-bot[rate-limiter]==20.1 -openai==0.28.1 +openai==1.30.2 tiktoken>=0.3.0 PyYAML==6.0 pymongo==4.3.3