Skip to content

Commit c2dc4da

Browse files
authored
Fix typos (#4021)
* Fix * Fix * Fix
1 parent de38513 commit c2dc4da

File tree

55 files changed

+136
-136
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+136
-136
lines changed

audio/paddleaudio/datasets/esc50.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ class ESC50(AudioClassificationDataset):
3535
http://dx.doi.org/10.1145/2733373.2806390
3636
"""
3737

38-
archieves = [
38+
archives = [
3939
{
4040
'url':
4141
'https://paddleaudio.bj.bcebos.com/datasets/ESC-50-master.zip',
@@ -133,7 +133,7 @@ def _get_meta_info(self) -> List[collections.namedtuple]:
133133
def _get_data(self, mode: str, split: int) -> Tuple[List[str], List[int]]:
134134
if not os.path.isdir(os.path.join(DATA_HOME, self.audio_path)) or \
135135
not os.path.isfile(os.path.join(DATA_HOME, self.meta)):
136-
download_and_decompress(self.archieves, DATA_HOME)
136+
download_and_decompress(self.archives, DATA_HOME)
137137

138138
meta_info = self._get_meta_info()
139139

audio/paddleaudio/datasets/gtzan.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ class GTZAN(AudioClassificationDataset):
3535
https://ieeexplore.ieee.org/document/1021072/
3636
"""
3737

38-
archieves = [
38+
archives = [
3939
{
4040
'url': 'http://opihi.cs.uvic.ca/sound/genres.tar.gz',
4141
'md5': '5b3d6dddb579ab49814ab86dba69e7c7',
@@ -85,7 +85,7 @@ def _get_data(self, mode, seed, n_folds,
8585
split) -> Tuple[List[str], List[int]]:
8686
if not os.path.isdir(os.path.join(DATA_HOME, self.audio_path)) or \
8787
not os.path.isfile(os.path.join(DATA_HOME, self.meta)):
88-
download_and_decompress(self.archieves, DATA_HOME)
88+
download_and_decompress(self.archives, DATA_HOME)
8989

9090
meta_info = self._get_meta_info()
9191
random.seed(seed) # shuffle samples to split data

audio/paddleaudio/datasets/rirs_noises.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030

3131

3232
class OpenRIRNoise(Dataset):
33-
archieves = [
33+
archives = [
3434
{
3535
'url': 'http://www.openslr.org/resources/28/rirs_noises.zip',
3636
'md5': 'e6f48e257286e05de56413b4779d8ffb',
@@ -76,7 +76,7 @@ def _get_data(self):
7676
print(f"rirs noises base path: {self.base_path}")
7777
if not os.path.isdir(self.base_path):
7878
download_and_decompress(
79-
self.archieves, self.base_path, decompress=True)
79+
self.archives, self.base_path, decompress=True)
8080
else:
8181
print(
8282
f"{self.base_path} already exists, we will not download and decompress again"

audio/paddleaudio/datasets/tess.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ class TESS(AudioClassificationDataset):
3737
https://doi.org/10.5683/SP2/E8H2MF
3838
"""
3939

40-
archieves = [
40+
archives = [
4141
{
4242
'url':
4343
'https://bj.bcebos.com/paddleaudio/datasets/TESS_Toronto_emotional_speech_set.zip',
@@ -93,7 +93,7 @@ def _get_meta_info(self, files) -> List[collections.namedtuple]:
9393
def _get_data(self, mode, seed, n_folds,
9494
split) -> Tuple[List[str], List[int]]:
9595
if not os.path.isdir(os.path.join(DATA_HOME, self.audio_path)):
96-
download_and_decompress(self.archieves, DATA_HOME)
96+
download_and_decompress(self.archives, DATA_HOME)
9797

9898
wav_files = []
9999
for root, _, files in os.walk(os.path.join(DATA_HOME, self.audio_path)):

audio/paddleaudio/datasets/urban_sound.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ class UrbanSound8K(AudioClassificationDataset):
3535
https://dl.acm.org/doi/10.1145/2647868.2655045
3636
"""
3737

38-
archieves = [
38+
archives = [
3939
{
4040
'url':
4141
'https://zenodo.org/record/1203745/files/UrbanSound8K.tar.gz',
@@ -81,7 +81,7 @@ def _get_meta_info(self):
8181
def _get_data(self, mode: str, split: int) -> Tuple[List[str], List[int]]:
8282
if not os.path.isdir(os.path.join(DATA_HOME, self.audio_path)) or \
8383
not os.path.isfile(os.path.join(DATA_HOME, self.meta)):
84-
download_and_decompress(self.archieves, DATA_HOME)
84+
download_and_decompress(self.archives, DATA_HOME)
8585

8686
meta_info = self._get_meta_info()
8787

audio/paddleaudio/datasets/voxceleb.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434

3535
class VoxCeleb(Dataset):
3636
source_url = 'https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/'
37-
archieves_audio_dev = [
37+
archives_audio_dev = [
3838
{
3939
'url': source_url + 'vox1_dev_wav_partaa',
4040
'md5': 'e395d020928bc15670b570a21695ed96',
@@ -52,13 +52,13 @@ class VoxCeleb(Dataset):
5252
'md5': '7bb1e9f70fddc7a678fa998ea8b3ba19',
5353
},
5454
]
55-
archieves_audio_test = [
55+
archives_audio_test = [
5656
{
5757
'url': source_url + 'vox1_test_wav.zip',
5858
'md5': '185fdc63c3c739954633d50379a3d102',
5959
},
6060
]
61-
archieves_meta = [
61+
archives_meta = [
6262
{
6363
'url':
6464
'https://www.robots.ox.ac.uk/~vgg/data/voxceleb/meta/veri_test2.txt',
@@ -135,11 +135,11 @@ def _get_data(self):
135135
if not os.path.isdir(self.wav_path):
136136
print("start to download the voxceleb1 dataset")
137137
download_and_decompress( # multi-zip parts concatenate to vox1_dev_wav.zip
138-
self.archieves_audio_dev,
138+
self.archives_audio_dev,
139139
self.base_path,
140140
decompress=False)
141141
download_and_decompress( # download the vox1_test_wav.zip and unzip
142-
self.archieves_audio_test,
142+
self.archives_audio_test,
143143
self.base_path,
144144
decompress=True)
145145

@@ -157,7 +157,7 @@ def _get_data(self):
157157
if not os.path.isdir(self.meta_path):
158158
print("prepare the meta data")
159159
download_and_decompress(
160-
self.archieves_meta, self.meta_path, decompress=False)
160+
self.archives_meta, self.meta_path, decompress=False)
161161

162162
# Data preparation.
163163
if not os.path.isdir(self.csv_path):

dataset/chime3_background/chime3_background.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def create_manifest(data_dir, manifest_path):
109109

110110

111111
def prepare_chime3(url, md5sum, target_dir, manifest_path):
112-
"""Download, unpack and create summmary manifest file."""
112+
"""Download, unpack and create summary manifest file."""
113113
if not os.path.exists(os.path.join(target_dir, "CHiME3")):
114114
# download
115115
filepath = download(url, md5sum, target_dir,

dataset/timit/timit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ def create_manifest(data_dir, manifest_path_prefix):
210210

211211

212212
def prepare_dataset(url, md5sum, target_dir, manifest_path):
213-
"""Download, unpack and create summmary manifest file.
213+
"""Download, unpack and create summary manifest file.
214214
"""
215215
filepath = os.path.join(target_dir, "TIMIT.zip")
216216
if not os.path.exists(filepath):

demos/TTSCppFrontend/src/front/front_interface.cpp

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -115,27 +115,27 @@ int FrontEngineInterface::init() {
115115

116116
// 生成词典(词到音素的映射)
117117
if (0 != GenDict(_word2phone_path, &word_phone_map)) {
118-
LOG(ERROR) << "Genarate word2phone dict failed";
118+
LOG(ERROR) << "Generate word2phone dict failed";
119119
return -1;
120120
}
121121

122122
// 生成音素字典(音素到音素id的映射)
123123
if (0 != GenDict(_phone2id_path, &phone_id_map)) {
124-
LOG(ERROR) << "Genarate phone2id dict failed";
124+
LOG(ERROR) << "Generate phone2id dict failed";
125125
return -1;
126126
}
127127

128128
// 生成音调字典(音调到音调id的映射)
129129
if (_separate_tone == "true") {
130130
if (0 != GenDict(_tone2id_path, &tone_id_map)) {
131-
LOG(ERROR) << "Genarate tone2id dict failed";
131+
LOG(ERROR) << "Generate tone2id dict failed";
132132
return -1;
133133
}
134134
}
135135

136136
// 生成繁简字典(繁体到简体id的映射)
137137
if (0 != GenDict(_trand2simp_path, &trand_simp_map)) {
138-
LOG(ERROR) << "Genarate trand2simp dict failed";
138+
LOG(ERROR) << "Generate trand2simp dict failed";
139139
return -1;
140140
}
141141

@@ -263,7 +263,7 @@ int FrontEngineInterface::GetWordsIds(
263263
if (0 !=
264264
GetInitialsFinals(word, &word_initials, &word_finals)) {
265265
LOG(ERROR)
266-
<< "Genarate the word_initials and word_finals of "
266+
<< "Generate the word_initials and word_finals of "
267267
<< word << " failed";
268268
return -1;
269269
}
@@ -304,7 +304,7 @@ int FrontEngineInterface::GetWordsIds(
304304

305305
// 音素到音素id
306306
if (0 != Phone2Phoneid(phone, phoneids, toneids)) {
307-
LOG(ERROR) << "Genarate the phone id of " << word << " failed";
307+
LOG(ERROR) << "Generate the phone id of " << word << " failed";
308308
return -1;
309309
}
310310
}
@@ -916,11 +916,11 @@ int FrontEngineInterface::NeuralSandhi(const std::string &word,
916916
if (find(must_neural_tone_words.begin(),
917917
must_neural_tone_words.end(),
918918
word) != must_neural_tone_words.end() ||
919-
(word_num >= 2 &&
920-
find(must_neural_tone_words.begin(),
921-
must_neural_tone_words.end(),
922-
ppspeech::wstring2utf8string(word_wstr.substr(
923-
word_num - 2))) != must_neural_tone_words.end())) {
919+
(word_num >= 2 && find(must_neural_tone_words.begin(),
920+
must_neural_tone_words.end(),
921+
ppspeech::wstring2utf8string(
922+
word_wstr.substr(word_num - 2))) !=
923+
must_neural_tone_words.end())) {
924924
(*finals).back() =
925925
(*finals).back().replace((*finals).back().length() - 1, 1, "5");
926926
}

demos/audio_searching/src/milvus_helpers.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,13 +77,13 @@ def create_collection(self, collection_name):
7777
field1 = FieldSchema(
7878
name="id",
7979
dtype=DataType.INT64,
80-
descrition="int64",
80+
description="int64",
8181
is_primary=True,
8282
auto_id=True)
8383
field2 = FieldSchema(
8484
name="embedding",
8585
dtype=DataType.FLOAT_VECTOR,
86-
descrition="speaker embeddings",
86+
description="speaker embeddings",
8787
dim=VECTOR_DIMENSION,
8888
is_primary=False)
8989
schema = CollectionSchema(

demos/speech_web/speech_server/main.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ async def websocket_endpoint_online(websocket: WebSocket):
225225
websocket (WebSocket): the websocket instance
226226
"""
227227

228-
#1. the interface wait to accept the websocket protocal header
228+
#1. the interface wait to accept the websocket protocol header
229229
# and only we receive the header, it establish the connection with specific thread
230230
await websocket.accept()
231231

@@ -238,7 +238,7 @@ async def websocket_endpoint_online(websocket: WebSocket):
238238
connection_handler = None
239239

240240
try:
241-
#4. we do a loop to process the audio package by package according the protocal
241+
#4. we do a loop to process the audio package by package according the protocol
242242
# and only if the client send finished signal, we will break the loop
243243
while True:
244244
# careful here, changed the source code from starlette.websockets

demos/streaming_tts_serving_fastdeploy/streaming_tts_serving/1/model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ class TritonPythonModel:
7575
def initialize(self, args):
7676
"""`initialize` is called only once when the model is being loaded.
7777
Implementing `initialize` function is optional. This function allows
78-
the model to intialize any state associated with this model.
78+
the model to initialize any state associated with this model.
7979
Parameters
8080
----------
8181
args : dict

examples/aishell3/tts3/conf/conformer.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ model:
4242
duration_predictor_layers: 2 # number of layers of duration predictor
4343
duration_predictor_chans: 256 # number of channels of duration predictor
4444
duration_predictor_kernel_size: 3 # filter size of duration predictor
45-
postnet_layers: 5 # number of layers of postnset
45+
postnet_layers: 5 # number of layers of postnet
4646
postnet_filts: 5 # filter size of conv layers in postnet
4747
postnet_chans: 256 # number of channels of conv layers in postnet
4848
encoder_normalize_before: True # whether to perform layer normalization before the input
@@ -66,14 +66,14 @@ model:
6666
transformer_dec_attn_dropout_rate: 0.2 # dropout rate for transformer decoder attention layer
6767
pitch_predictor_layers: 5 # number of conv layers in pitch predictor
6868
pitch_predictor_chans: 256 # number of channels of conv layers in pitch predictor
69-
pitch_predictor_kernel_size: 5 # kernel size of conv leyers in pitch predictor
69+
pitch_predictor_kernel_size: 5 # kernel size of conv layers in pitch predictor
7070
pitch_predictor_dropout: 0.5 # dropout rate in pitch predictor
7171
pitch_embed_kernel_size: 1 # kernel size of conv embedding layer for pitch
7272
pitch_embed_dropout: 0.0 # dropout rate after conv embedding layer for pitch
7373
stop_gradient_from_pitch_predictor: true # whether to stop the gradient from pitch predictor to encoder
7474
energy_predictor_layers: 2 # number of conv layers in energy predictor
7575
energy_predictor_chans: 256 # number of channels of conv layers in energy predictor
76-
energy_predictor_kernel_size: 3 # kernel size of conv leyers in energy predictor
76+
energy_predictor_kernel_size: 3 # kernel size of conv layers in energy predictor
7777
energy_predictor_dropout: 0.5 # dropout rate in energy predictor
7878
energy_embed_kernel_size: 1 # kernel size of conv embedding layer for energy
7979
energy_embed_dropout: 0.0 # dropout rate after conv embedding layer for energy

examples/aishell3/tts3/conf/default.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ model:
4242
duration_predictor_layers: 2 # number of layers of duration predictor
4343
duration_predictor_chans: 256 # number of channels of duration predictor
4444
duration_predictor_kernel_size: 3 # filter size of duration predictor
45-
postnet_layers: 5 # number of layers of postnset
45+
postnet_layers: 5 # number of layers of postnet
4646
postnet_filts: 5 # filter size of conv layers in postnet
4747
postnet_chans: 256 # number of channels of conv layers in postnet
4848
use_scaled_pos_enc: True # whether to use scaled positional encoding
@@ -60,14 +60,14 @@ model:
6060
transformer_dec_attn_dropout_rate: 0.2 # dropout rate for transformer decoder attention layer
6161
pitch_predictor_layers: 5 # number of conv layers in pitch predictor
6262
pitch_predictor_chans: 256 # number of channels of conv layers in pitch predictor
63-
pitch_predictor_kernel_size: 5 # kernel size of conv leyers in pitch predictor
63+
pitch_predictor_kernel_size: 5 # kernel size of conv layers in pitch predictor
6464
pitch_predictor_dropout: 0.5 # dropout rate in pitch predictor
6565
pitch_embed_kernel_size: 1 # kernel size of conv embedding layer for pitch
6666
pitch_embed_dropout: 0.0 # dropout rate after conv embedding layer for pitch
6767
stop_gradient_from_pitch_predictor: True # whether to stop the gradient from pitch predictor to encoder
6868
energy_predictor_layers: 2 # number of conv layers in energy predictor
6969
energy_predictor_chans: 256 # number of channels of conv layers in energy predictor
70-
energy_predictor_kernel_size: 3 # kernel size of conv leyers in energy predictor
70+
energy_predictor_kernel_size: 3 # kernel size of conv layers in energy predictor
7171
energy_predictor_dropout: 0.5 # dropout rate in energy predictor
7272
energy_embed_kernel_size: 1 # kernel size of conv embedding layer for energy
7373
energy_embed_dropout: 0.0 # dropout rate after conv embedding layer for energy

examples/aishell3/vc1/conf/default.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ model:
4242
duration_predictor_layers: 2 # number of layers of duration predictor
4343
duration_predictor_chans: 256 # number of channels of duration predictor
4444
duration_predictor_kernel_size: 3 # filter size of duration predictor
45-
postnet_layers: 5 # number of layers of postnset
45+
postnet_layers: 5 # number of layers of postnet
4646
postnet_filts: 5 # filter size of conv layers in postnet
4747
postnet_chans: 256 # number of channels of conv layers in postnet
4848
use_scaled_pos_enc: True # whether to use scaled positional encoding
@@ -60,14 +60,14 @@ model:
6060
transformer_dec_attn_dropout_rate: 0.2 # dropout rate for transformer decoder attention layer
6161
pitch_predictor_layers: 5 # number of conv layers in pitch predictor
6262
pitch_predictor_chans: 256 # number of channels of conv layers in pitch predictor
63-
pitch_predictor_kernel_size: 5 # kernel size of conv leyers in pitch predictor
63+
pitch_predictor_kernel_size: 5 # kernel size of conv layers in pitch predictor
6464
pitch_predictor_dropout: 0.5 # dropout rate in pitch predictor
6565
pitch_embed_kernel_size: 1 # kernel size of conv embedding layer for pitch
6666
pitch_embed_dropout: 0.0 # dropout rate after conv embedding layer for pitch
6767
stop_gradient_from_pitch_predictor: True # whether to stop the gradient from pitch predictor to encoder
6868
energy_predictor_layers: 2 # number of conv layers in energy predictor
6969
energy_predictor_chans: 256 # number of channels of conv layers in energy predictor
70-
energy_predictor_kernel_size: 3 # kernel size of conv leyers in energy predictor
70+
energy_predictor_kernel_size: 3 # kernel size of conv layers in energy predictor
7171
energy_predictor_dropout: 0.5 # dropout rate in energy predictor
7272
energy_embed_kernel_size: 1 # kernel size of conv embedding layer for energy
7373
energy_embed_dropout: 0.0 # dropout rate after conv embedding layer for energy

examples/aishell3/vc2/conf/default.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ model:
4242
duration_predictor_layers: 2 # number of layers of duration predictor
4343
duration_predictor_chans: 256 # number of channels of duration predictor
4444
duration_predictor_kernel_size: 3 # filter size of duration predictor
45-
postnet_layers: 5 # number of layers of postnset
45+
postnet_layers: 5 # number of layers of postnet
4646
postnet_filts: 5 # filter size of conv layers in postnet
4747
postnet_chans: 256 # number of channels of conv layers in postnet
4848
use_scaled_pos_enc: True # whether to use scaled positional encoding
@@ -60,14 +60,14 @@ model:
6060
transformer_dec_attn_dropout_rate: 0.2 # dropout rate for transformer decoder attention layer
6161
pitch_predictor_layers: 5 # number of conv layers in pitch predictor
6262
pitch_predictor_chans: 256 # number of channels of conv layers in pitch predictor
63-
pitch_predictor_kernel_size: 5 # kernel size of conv leyers in pitch predictor
63+
pitch_predictor_kernel_size: 5 # kernel size of conv layers in pitch predictor
6464
pitch_predictor_dropout: 0.5 # dropout rate in pitch predictor
6565
pitch_embed_kernel_size: 1 # kernel size of conv embedding layer for pitch
6666
pitch_embed_dropout: 0.0 # dropout rate after conv embedding layer for pitch
6767
stop_gradient_from_pitch_predictor: True # whether to stop the gradient from pitch predictor to encoder
6868
energy_predictor_layers: 2 # number of conv layers in energy predictor
6969
energy_predictor_chans: 256 # number of channels of conv layers in energy predictor
70-
energy_predictor_kernel_size: 3 # kernel size of conv leyers in energy predictor
70+
energy_predictor_kernel_size: 3 # kernel size of conv layers in energy predictor
7171
energy_predictor_dropout: 0.5 # dropout rate in energy predictor
7272
energy_embed_kernel_size: 1 # kernel size of conv embedding layer for energy
7373
energy_embed_dropout: 0.0 # dropout rate after conv embedding layer for energy

0 commit comments

Comments
 (0)