From 02676e650576d59b8fc3de45ca312f714edd4139 Mon Sep 17 00:00:00 2001 From: teamblubee Date: Sat, 20 Jan 2024 23:52:12 +0800 Subject: [PATCH] fixed typo in Demo/Ipython_L* code --- Demo/Inference_LJSpeech.ipynb | 6 +++--- Demo/Inference_LibriTTS.ipynb | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Demo/Inference_LJSpeech.ipynb b/Demo/Inference_LJSpeech.ipynb index 3a6923e4..d5578142 100644 --- a/Demo/Inference_LJSpeech.ipynb +++ b/Demo/Inference_LJSpeech.ipynb @@ -68,7 +68,7 @@ "from models import *\n", "from utils import *\n", "from text_utils import TextCleaner\n", - "textclenaer = TextCleaner()\n", + "textcleaner = TextCleaner()\n", "\n", "%matplotlib inline" ] @@ -272,7 +272,7 @@ " ps = word_tokenize(ps[0])\n", " ps = ' '.join(ps)\n", "\n", - " tokens = textclenaer(ps)\n", + " tokens = textcleaner(ps)\n", " tokens.insert(0, 0)\n", " tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n", " \n", @@ -465,7 +465,7 @@ " ps = word_tokenize(ps[0])\n", " ps = ' '.join(ps)\n", "\n", - " tokens = textclenaer(ps)\n", + " tokens = textcleaner(ps)\n", " tokens.insert(0, 0)\n", " tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n", " \n", diff --git a/Demo/Inference_LibriTTS.ipynb b/Demo/Inference_LibriTTS.ipynb index 4b85bf5f..e71cf5e7 100644 --- a/Demo/Inference_LibriTTS.ipynb +++ b/Demo/Inference_LibriTTS.ipynb @@ -70,7 +70,7 @@ "from models import *\n", "from utils import *\n", "from text_utils import TextCleaner\n", - "textclenaer = TextCleaner()\n", + "textcleaner = TextCleaner()\n", "\n", "%matplotlib inline" ] @@ -260,7 +260,7 @@ " ps = global_phonemizer.phonemize([text])\n", " ps = word_tokenize(ps[0])\n", " ps = ' '.join(ps)\n", - " tokens = textclenaer(ps)\n", + " tokens = textcleaner(ps)\n", " tokens.insert(0, 0)\n", " tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n", " \n", @@ -691,7 +691,7 @@ " ps = ps.replace('``', '\"')\n", " ps = ps.replace(\"''\", '\"')\n", "\n", - " tokens = textclenaer(ps)\n", + " tokens = textcleaner(ps)\n", " tokens.insert(0, 0)\n", " tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n", " \n", @@ -815,7 +815,7 @@ " ps = word_tokenize(ps[0])\n", " ps = ' '.join(ps)\n", "\n", - " tokens = textclenaer(ps)\n", + " tokens = textcleaner(ps)\n", " tokens.insert(0, 0)\n", " tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)\n", " \n", @@ -824,7 +824,7 @@ " ps = word_tokenize(ps[0])\n", " ps = ' '.join(ps)\n", "\n", - " ref_tokens = textclenaer(ps)\n", + " ref_tokens = textcleaner(ps)\n", " ref_tokens.insert(0, 0)\n", " ref_tokens = torch.LongTensor(ref_tokens).to(device).unsqueeze(0)\n", " \n",