Skip to content

Commit 64d56e6

Browse files
committed
typos fix
1 parent 1dc4719 commit 64d56e6

File tree

1 file changed

+7
-24
lines changed

1 file changed

+7
-24
lines changed

notebooks/284-openvoice/284-openvoice.ipynb

Lines changed: 7 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@
207207
"cell_type": "markdown",
208208
"metadata": {},
209209
"source": [
210-
"There are 2 models in OpenVoice: first one is responsible for speach generation `BaseSpeakerTTS` and the second one `ToneColorConverter` imposes arbitrary voice tone to the original speech. To convert to OpenVino IR format first we need to get acceptable `torch.nn.Module` object. Both ToneColorConverter, BaseSpeakerTTS instead of using `self.forward` as the main entry point use custom `infer` and `convert_voice` methods respectively, therefore need to wrap them with a custom class that is inherited from torch.nn.Module. \n",
210+
"There are 2 models in OpenVoice: first one is responsible for speech generation `BaseSpeakerTTS` and the second one `ToneColorConverter` imposes arbitrary voice tone to the original speech. To convert to OpenVino IR format first we need to get acceptable `torch.nn.Module` object. Both ToneColorConverter, BaseSpeakerTTS instead of using `self.forward` as the main entry point use custom `infer` and `convert_voice` methods respectively, therefore need to wrap them with a custom class that is inherited from torch.nn.Module. \n",
211211
"\n",
212212
"<!---\n",
213213
"# One more reason to make a wrapper is also that these functions use float arguments while only torch.Tensor and tuple of torch.Tensors are acceptable \n",
@@ -217,7 +217,7 @@
217217
},
218218
{
219219
"cell_type": "code",
220-
"execution_count": 8,
220+
"execution_count": null,
221221
"metadata": {},
222222
"outputs": [],
223223
"source": [
@@ -237,7 +237,7 @@
237237
" \n",
238238
"class OVOpenVoiceTTS(OVOpenVoiceBase):\n",
239239
" \"\"\"\n",
240-
" Constructor of this class accepts BaseSpeakerTTS object for speach generation and wraps it's 'infer' method with forward.\n",
240+
" Constructor of this class accepts BaseSpeakerTTS object for speech generation and wraps it's 'infer' method with forward.\n",
241241
" \"\"\"\n",
242242
" def get_example_input(self):\n",
243243
" stn_tst = self.voice_model.get_text('this is original text', self.voice_model.hps, False)\n",
@@ -366,29 +366,13 @@
366366
},
367367
{
368368
"cell_type": "code",
369-
"execution_count": 11,
369+
"execution_count": null,
370370
"metadata": {},
371-
"outputs": [
372-
{
373-
"data": {
374-
"application/vnd.jupyter.widget-view+json": {
375-
"model_id": "e3dc3666c26c432bac345c670fd42c3a",
376-
"version_major": 2,
377-
"version_minor": 0
378-
},
379-
"text/plain": [
380-
"Dropdown(description='reference voice from which tone color will be copied', options=('demo_speaker0.mp3', 'de…"
381-
]
382-
},
383-
"execution_count": 11,
384-
"metadata": {},
385-
"output_type": "execute_result"
386-
}
387-
],
371+
"outputs": [],
388372
"source": [
389373
"REFERENCE_VOICES_PATH = f'{repo_dir}/resources/'\n",
390374
"reference_speakers = [\n",
391-
" *[path for path in os.listdir(REFERENCE_VOICES_PATH) if os.path.splitext(path)[-1] == '.mp3'],\n",
375+
" *[path for path in os.listdir(REFERENCE_VOICES_PATH) if os.path.splitext(path)[-1] == '.mp3'],\n",
392376
" 'record_manually',\n",
393377
" 'load_manually',\n",
394378
"]\n",
@@ -609,8 +593,7 @@
609593
"outputs": [],
610594
"source": [
611595
"if voice_source.value == 'choose_manually':\n",
612-
" upload_orig_voice = widgets.FileUpload(accept=allowed_audio_types, multiple=False, \n",
613-
" description='audo whose tone will be replaced')\n",
596+
" upload_orig_voice = widgets.FileUpload(accept=allowed_audio_types, multiple=False, description='audo whose tone will be replaced')\n",
614597
" display(upload_orig_voice)"
615598
]
616599
},

0 commit comments

Comments
 (0)