diff --git a/install.bat b/install.bat index d98c06f..a4c917d 100644 --- a/install.bat +++ b/install.bat @@ -1,73 +1,75 @@ @echo off -setlocal -title Installer +setlocal enabledelayedexpansion +title Applio Installer + +echo Welcome to the RVC CLI Installer! +echo. set "principal=%cd%" -set "URL_EXTRA=https://huggingface.co/IAHispano/applio/resolve/main" set "CONDA_ROOT_PREFIX=%UserProfile%\Miniconda3" set "INSTALL_ENV_DIR=%principal%\env" set "MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-py39_23.9.0-0-Windows-x86_64.exe" set "CONDA_EXECUTABLE=%CONDA_ROOT_PREFIX%\Scripts\conda.exe" -if not exist "%cd%\env.zip" ( - echo Downloading the fairseq build... - curl -s -LJO %URL_EXTRA%/env.zip -o env.zip -) - -if not exist "%cd%\env.zip" ( - echo Download failed, trying with the powershell method - powershell -Command "& {Invoke-WebRequest -Uri '%URL_EXTRA%/env.zip' -OutFile 'env.zip'}" -) - -if not exist "%cd%\env" ( - echo Extracting the file... - powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('%cd%\env.zip', '%cd%') }" -) - -if not exist "%cd%\env" ( - echo Extracting failed trying with the tar method... - tar -xf %cd%\env.zip -) - -if exist "%cd%\env" ( - del env.zip -) else ( - echo Theres a problem extracting the file please download the file and extract it manually. - echo https://huggingface.co/IAHispano/applio/resolve/main/env.zip - pause - exit +echo Cleaning up unnecessary files... +for %%F in (Makefile Dockerfile docker-compose.yaml *.sh) do ( + if exist "%%F" del "%%F" ) +echo Cleanup complete. +echo. if not exist "%CONDA_EXECUTABLE%" ( - echo Downloading Miniconda from %MINICONDA_DOWNLOAD_URL%... - curl %MINICONDA_DOWNLOAD_URL% -o miniconda.exe - - if not exist "%principal%\miniconda.exe" ( - echo Download failed trying with the powershell method. - powershell -Command "& {Invoke-WebRequest -Uri '%MINICONDA_DOWNLOAD_URL%' -OutFile 'miniconda.exe'}" + echo Miniconda not found. Starting download and installation... + echo Downloading Miniconda... + powershell -Command "& {Invoke-WebRequest -Uri '%MINICONDA_DOWNLOAD_URL%' -OutFile 'miniconda.exe'}" + if not exist "miniconda.exe" ( + echo Download failed. Please check your internet connection and try again. + goto :error ) - echo Installing Miniconda to %CONDA_ROOT_PREFIX%... + echo Installing Miniconda... start /wait "" miniconda.exe /InstallationType=JustMe /RegisterPython=0 /S /D=%CONDA_ROOT_PREFIX% + if errorlevel 1 ( + echo Miniconda installation failed. + goto :error + ) del miniconda.exe + echo Miniconda installation complete. +) else ( + echo Miniconda already installed. Skipping installation. ) +echo. +echo Creating Conda environment... call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.9 +if errorlevel 1 goto :error +echo Conda environment created successfully. +echo. -if exist "%cd%\env\python.exe" ( - echo Installing pip version less than 24.1... - "%cd%\env\python.exe" -m pip install "pip<24.1" +if exist "%INSTALL_ENV_DIR%\python.exe" ( + echo Installing specific pip version... + "%INSTALL_ENV_DIR%\python.exe" -m pip install "pip<24.1" + if errorlevel 1 goto :error + echo Pip installation complete. + echo. ) -echo Installing the dependencies... -call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" -pip install --upgrade setuptools -pip install -r "%principal%\requirements.txt" +echo Installing dependencies... +call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || goto :error +pip install --upgrade setuptools || goto :error +pip install -r "%principal%\requirements.txt" || goto :error pip uninstall torch torchvision torchaudio -y -pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cu121 +pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cu121 || goto :error call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" deactivate -echo. +echo Dependencies installation complete. +echo echo RVC CLI has been installed successfully! +echo. +pause +exit /b 0 + +:error +echo An error occurred during installation. Please check the output above for details. pause -cls \ No newline at end of file +exit /b 1 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 2be70ba..7232ce3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,8 +21,6 @@ pedalboard # Machine learning omegaconf==2.0.5; sys_platform == 'darwin' -git+https://github.com/IAHispano/fairseq; sys_platform == 'linux' -fairseq==0.12.2; sys_platform == 'darwin' or sys_platform == 'win32' numba; sys_platform == 'linux' numba==0.57.0; sys_platform == 'darwin' or sys_platform == 'win32' torchaudio==2.1.1 @@ -32,6 +30,7 @@ torchvision==0.16.1 einops libf0 torchfcpe +transformers==4.44.2 # Visualization matplotlib==3.7.2 diff --git a/rvc/configs/config.py b/rvc/configs/config.py index 8b5ec68..51eb0e2 100644 --- a/rvc/configs/config.py +++ b/rvc/configs/config.py @@ -131,6 +131,13 @@ def device_config(self) -> tuple: def set_cuda_config(self): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) + # Zluda + if self.gpu_name.endswith("[ZLUDA]"): + print("Zluda compatibility enabled, experimental feature.") + torch.backends.cudnn.enabled = False + torch.backends.cuda.enable_flash_sdp(False) + torch.backends.cuda.enable_math_sdp(True) + torch.backends.cuda.enable_mem_efficient_sdp(False) low_end_gpus = ["16", "P40", "P10", "1060", "1070", "1080"] if ( any(gpu in self.gpu_name for gpu in low_end_gpus) diff --git a/rvc/infer/infer.py b/rvc/infer/infer.py index 352ab78..56cfd6a 100644 --- a/rvc/infer/infer.py +++ b/rvc/infer/infer.py @@ -53,6 +53,7 @@ def __init__(self): self.hubert_model = ( None # Initialize the Hubert model (for embedding extraction) ) + self.last_embedder_model = None # Last used embedder model self.tgt_sr = None # Target sampling rate for the output audio self.net_g = None # Generator network for voice conversion self.vc = None # Voice conversion pipeline instance @@ -69,8 +70,8 @@ def load_hubert(self, embedder_model: str, embedder_model_custom: str = None): embedder_model (str): Path to the pre-trained HuBERT model. embedder_model_custom (str): Path to the custom HuBERT model. """ - models, _, _ = load_embedding(embedder_model, embedder_model_custom) - self.hubert_model = models[0].to(self.config.device) + self.hubert_model = load_embedding(embedder_model, embedder_model_custom) + self.hubert_model.to(self.config.device) self.hubert_model = ( self.hubert_model.half() if self.config.is_half @@ -333,8 +334,9 @@ def convert_audio( if audio_max > 1: audio /= audio_max - if not self.hubert_model: + if not self.hubert_model or embedder_model != self.last_embedder_model: self.load_hubert(embedder_model, embedder_model_custom) + self.last_embedder_model = embedder_model file_index = ( index_path.strip() @@ -637,8 +639,9 @@ def convert_audio_batch( with open(pid_file_path, "w") as pid_file: pid_file.write(str(pid)) try: - if not self.hubert_model: + if not self.hubert_model or embedder_model != self.last_embedder_model: self.load_hubert(embedder_model, embedder_model_custom) + self.last_embedder_model = embedder_model self.get_vc(model_path, sid) file_index = ( index_path.strip() diff --git a/rvc/infer/pipeline.py b/rvc/infer/pipeline.py index cb4f6bd..e3eae01 100644 --- a/rvc/infer/pipeline.py +++ b/rvc/infer/pipeline.py @@ -425,14 +425,11 @@ def voice_conversion( feats = feats.view(1, -1) padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] + feats = model(feats.to(self.device))["last_hidden_state"] + feats = ( + model.final_proj(feats[0]).unsqueeze(0) if version == "v1" else feats + ) if protect < 0.5 and pitch != None and pitchf != None: feats0 = feats.clone() if ( diff --git a/rvc/lib/algorithm/commons.py b/rvc/lib/algorithm/commons.py index 2524abc..c76328c 100644 --- a/rvc/lib/algorithm/commons.py +++ b/rvc/lib/algorithm/commons.py @@ -157,6 +157,24 @@ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): return acts +# Zluda, same as previous, but without jit.script +def fused_add_tanh_sigmoid_multiply_no_jit(input_a, input_b, n_channels): + """ + Fused add tanh sigmoid multiply operation. + + Args: + input_a: The first input tensor. + input_b: The second input tensor. + n_channels: The number of channels. + """ + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + def convert_pad_shape(pad_shape: List[List[int]]) -> List[int]: """ Convert the pad shape to a list of integers. diff --git a/rvc/lib/algorithm/modules.py b/rvc/lib/algorithm/modules.py index 1038356..7a9bc54 100644 --- a/rvc/lib/algorithm/modules.py +++ b/rvc/lib/algorithm/modules.py @@ -1,5 +1,8 @@ import torch -from rvc.lib.algorithm.commons import fused_add_tanh_sigmoid_multiply +from rvc.lib.algorithm.commons import ( + fused_add_tanh_sigmoid_multiply_no_jit, + fused_add_tanh_sigmoid_multiply, +) class WaveNet(torch.nn.Module): @@ -85,6 +88,11 @@ def forward(self, x, x_mask, g=None, **kwargs): if g is not None: g = self.cond_layer(g) + # Zluda + is_zluda = x.device.type == "cuda" and torch.cuda.get_device_name().endswith( + "[ZLUDA]" + ) + for i in range(self.n_layers): x_in = self.in_layers[i](x) if g is not None: @@ -93,7 +101,14 @@ def forward(self, x, x_mask, g=None, **kwargs): else: g_l = torch.zeros_like(x_in) - acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) + # Preventing HIP crash by not using jit-decorated function + if is_zluda: + acts = fused_add_tanh_sigmoid_multiply_no_jit( + x_in, g_l, n_channels_tensor + ) + else: + acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) + acts = self.drop(acts) res_skip_acts = self.res_skip_layers[i](acts) diff --git a/rvc/lib/predictors/RMVPE.py b/rvc/lib/predictors/RMVPE.py index f4d9175..7e9f6dd 100644 --- a/rvc/lib/predictors/RMVPE.py +++ b/rvc/lib/predictors/RMVPE.py @@ -408,6 +408,15 @@ def forward(self, audio, keyshift=0, speed=1, center=True): self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( audio.device ) + + # Zluda, fall-back to CPU for FFTs since HIP SDK has no cuFFT alternative + source_device = audio.device + if audio.device.type == "cuda" and torch.cuda.get_device_name().endswith( + "[ZLUDA]" + ): + audio = audio.to("cpu") + self.hann_window[keyshift_key] = self.hann_window[keyshift_key].to("cpu") + fft = torch.stft( audio, n_fft=n_fft_new, @@ -416,7 +425,8 @@ def forward(self, audio, keyshift=0, speed=1, center=True): window=self.hann_window[keyshift_key], center=center, return_complex=True, - ) + ).to(source_device) + magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) if keyshift != 0: size = self.n_fft // 2 + 1 diff --git a/rvc/lib/tools/prerequisites_download.py b/rvc/lib/tools/prerequisites_download.py index dbc9f06..e383562 100644 --- a/rvc/lib/tools/prerequisites_download.py +++ b/rvc/lib/tools/prerequisites_download.py @@ -44,7 +44,7 @@ ) ] models_list = [("predictors/", ["rmvpe.pt", "fcpe.pt"])] -embedders_list = [("embedders/", ["contentvec_base.pt"])] +embedders_list = [("embedders/contentvec/", ["pytorch_model.bin", "config.json"])] linux_executables_list = [("formant/", ["stftpitchshift"])] executables_list = [ ("", ["ffmpeg.exe", "ffprobe.exe"]), @@ -54,7 +54,7 @@ folder_mapping_list = { "pretrained_v1/": "rvc/models/pretraineds/pretrained_v1/", "pretrained_v2/": "rvc/models/pretraineds/pretrained_v2/", - "embedders/": "rvc/models/embedders/", + "embedders/contentvec/": "rvc/models/embedders/contentvec/", "predictors/": "rvc/models/predictors/", "formant/": "rvc/models/formant/", } diff --git a/rvc/lib/utils.py b/rvc/lib/utils.py index a53810d..429a172 100644 --- a/rvc/lib/utils.py +++ b/rvc/lib/utils.py @@ -4,16 +4,23 @@ import numpy as np import re import unicodedata -from fairseq import checkpoint_utils import wget import subprocess from pydub import AudioSegment import tempfile +from torch import nn import logging +from transformers import HubertModel +import warnings -logging.getLogger("fairseq").setLevel(logging.WARNING) -logging.getLogger("faiss.loader").setLevel(logging.WARNING) +# Remove this to see warnings about transformers models +warnings.filterwarnings("ignore") + +logging.getLogger("fairseq").setLevel(logging.ERROR) +logging.getLogger("faiss.loader").setLevel(logging.ERROR) +logging.getLogger("transformers").setLevel(logging.ERROR) +logging.getLogger("torch").setLevel(logging.ERROR) now_dir = os.getcwd() sys.path.append(now_dir) @@ -22,6 +29,12 @@ stft = base_path + ".exe" if sys.platform == "win32" else base_path +class HubertModelWithFinalProj(HubertModel): + def __init__(self, config): + super().__init__(config) + self.final_proj = nn.Linear(config.hidden_size, config.classifier_proj_size) + + def load_audio(file, sample_rate): try: file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") @@ -96,36 +109,45 @@ def format_title(title): def load_embedding(embedder_model, custom_embedder=None): embedder_root = os.path.join(now_dir, "rvc", "models", "embedders") embedding_list = { - "contentvec": os.path.join(embedder_root, "contentvec_base.pt"), - "chinese-hubert-base": os.path.join(embedder_root, "chinese_hubert_base.pt"), - "japanese-hubert-base": os.path.join(embedder_root, "japanese_hubert_base.pt"), - "korean-hubert-base": os.path.join(embedder_root, "korean_hubert_base.pt"), + "contentvec": os.path.join(embedder_root, "contentvec"), + "chinese-hubert-base": os.path.join(embedder_root, "chinese_hubert_base"), + "japanese-hubert-base": os.path.join(embedder_root, "japanese_hubert_base"), + "korean-hubert-base": os.path.join(embedder_root, "korean_hubert_base"), } online_embedders = { - "chinese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base.pt", - "japanese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base.pt", - "korean-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base.pt", + "contentvec": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/contentvec/pytorch_model.bin", + "chinese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base/pytorch_model.bin", + "japanese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base/pytorch_model.bin", + "korean-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base/pytorch_model.bin", + } + + config_files = { + "contentvec": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/contentvec/config.json", + "chinese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base/config.json", + "japanese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base/config.json", + "korean-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base/config.json", } if embedder_model == "custom": - model_path = custom_embedder - if not custom_embedder and os.path.exists(custom_embedder): + if os.path.exists(custom_embedder): + model_path = custom_embedder + else: + print(f"Custom embedder not found: {custom_embedder}, using contentvec") model_path = embedding_list["contentvec"] else: model_path = embedding_list[embedder_model] - if embedder_model in online_embedders: - if not os.path.exists(model_path): - url = online_embedders[embedder_model] - print(f"Downloading {url} to {model_path}...") - wget.download(url, out=model_path) - else: - model_path = embedding_list["contentvec"] - - models = checkpoint_utils.load_model_ensemble_and_task( - [model_path], - suffix="", - ) - - # print(f"Embedding model {embedder_model} loaded successfully.") + bin_file = os.path.join(model_path, "pytorch_model.bin") + json_file = os.path.join(model_path, "config.json") + os.makedirs(model_path, exist_ok=True) + if not os.path.exists(bin_file): + url = online_embedders[embedder_model] + print(f"Downloading {url} to {model_path}...") + wget.download(url, out=bin_file) + if not os.path.exists(json_file): + url = config_files[embedder_model] + print(f"Downloading {url} to {model_path}...") + wget.download(url, out=json_file) + + models = HubertModelWithFinalProj.from_pretrained(model_path) return models diff --git a/rvc/train/extract/extract.py b/rvc/train/extract/extract.py index 8f2e5ff..b9fe041 100644 --- a/rvc/train/extract/extract.py +++ b/rvc/train/extract/extract.py @@ -229,7 +229,7 @@ def run_pitch_extraction(exp_dir, f0_method, hop_length, num_processes, gpus): print(f"Pitch extraction completed in {elapsed_time:.2f} seconds.") -def process_file_embedding(file, wav_path, out_path, model, device, version, saved_cfg): +def process_file_embedding(file, wav_path, out_path, model, device, version): """Process a single audio file for embedding extraction.""" wav_file_path = os.path.join(wav_path, file) out_file_path = os.path.join(out_path, file.replace("wav", "npy")) @@ -237,21 +237,14 @@ def process_file_embedding(file, wav_path, out_path, model, device, version, sav if os.path.exists(out_file_path): return - feats = read_wave(wav_file_path, normalize=saved_cfg.task.normalize) - dtype = torch.float16 if device.startswith("cuda") else torch.float32 + feats = read_wave(wav_file_path) + dtype = torch.float16 if config.is_half else torch.float32 feats = feats.to(dtype).to(device) - - padding_mask = torch.BoolTensor(feats.shape).fill_(False).to(dtype).to(device) - inputs = { - "source": feats, - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } + model = model.to(dtype).to(device) with torch.no_grad(): - model = model.to(device).to(dtype) - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] + feats = model(feats)["last_hidden_state"] + feats = model.final_proj(feats[0]).unsqueeze(0) if version == "v1" else feats feats = feats.squeeze(0).float().cpu().numpy() if not np.isnan(feats).any(): @@ -269,8 +262,16 @@ def run_embedding_extraction( print("Starting embedding extraction...") start_time = time.time() - models, saved_cfg, _ = load_embedding(embedder_model, embedder_model_custom) - model = models[0] + models = load_embedding(embedder_model, embedder_model_custom) + + # Zluda + if torch.cuda.is_available() and torch.cuda.get_device_name().endswith("[ZLUDA]"): + print("Disabling CUDNN for Zluda") + torch.backends.cudnn.enabled = False + torch.backends.cuda.enable_flash_sdp(False) + torch.backends.cuda.enable_math_sdp(True) + torch.backends.cuda.enable_mem_efficient_sdp(False) + devices = [get_device(gpu) for gpu in (gpus.split("-") if gpus != "-" else ["cpu"])] paths = sorted([file for file in os.listdir(wav_path) if file.endswith(".wav")]) @@ -281,7 +282,7 @@ def run_embedding_extraction( pbar = tqdm.tqdm(total=len(paths) * len(devices), desc="Embedding Extraction") tasks = [ - (file, wav_path, out_path, model, device, version, saved_cfg) + (file, wav_path, out_path, models, device, version) for file in paths for device in devices ] diff --git a/rvc/train/mel_processing.py b/rvc/train/mel_processing.py index ecad648..1f5e8f3 100644 --- a/rvc/train/mel_processing.py +++ b/rvc/train/mel_processing.py @@ -76,6 +76,12 @@ def spectrogram_torch(y, n_fft, hop_size, win_size, center=False): ) y = y.squeeze(1) + # Zluda, fall-back to CPU for FFTs since HIP SDK has no cuFFT alternative + source_device = y.device + if y.device.type == "cuda" and torch.cuda.get_device_name().endswith("[ZLUDA]"): + y = y.to("cpu") + hann_window[wnsize_dtype_device] = hann_window[wnsize_dtype_device].to("cpu") + spec = torch.stft( y, n_fft, @@ -87,7 +93,7 @@ def spectrogram_torch(y, n_fft, hop_size, win_size, center=False): normalized=False, onesided=True, return_complex=True, - ) + ).to(source_device) spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + 1e-6) diff --git a/rvc/train/train.py b/rvc/train/train.py index a4e5ce9..b4c1120 100644 --- a/rvc/train/train.py +++ b/rvc/train/train.py @@ -319,6 +319,14 @@ def run( if torch.cuda.is_available(): torch.cuda.set_device(rank) + # Zluda + if torch.cuda.is_available() and torch.cuda.get_device_name().endswith("[ZLUDA]"): + print("Disabling CUDNN for traning with Zluda") + torch.backends.cudnn.enabled = False + torch.backends.cuda.enable_flash_sdp(False) + torch.backends.cuda.enable_math_sdp(True) + torch.backends.cuda.enable_mem_efficient_sdp(False) + # Create datasets and dataloaders if pitch_guidance == True: train_dataset = TextAudioLoaderMultiNSFsid(config.data) diff --git a/rvc_cli.py b/rvc_cli.py index 90479f4..bca001c 100644 --- a/rvc_cli.py +++ b/rvc_cli.py @@ -86,6 +86,8 @@ def run_infer_script( delay: bool = False, *sliders: list, ): + if not sliders: + sliders = [0] * 25 infer_pipeline = import_voice_converter() additional_params = { "reverb_room_size": sliders[0], @@ -198,6 +200,8 @@ def run_batch_infer_script( f for f in os.listdir(input_folder) if f.endswith((".mp3", ".wav", ".flac")) ] print(f"Detected {len(audio_files)} audio files for inference.") + if not sliders: + sliders = [0] * 25 infer_pipeline = import_voice_converter() additional_params = { "reverb_room_size": sliders[0],