mirror of
https://github.com/neodyland/sbv2-api.git
synced 2026-01-06 06:22:57 +00:00
Compare commits
114 Commits
v0.2.0-alp
...
v0.2.0-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cc664fae2d | ||
|
|
71ec658772 | ||
|
|
dff939091c | ||
|
|
8a28a4e7a5 | ||
|
|
21f845a799 | ||
|
|
4ff9a38a80 | ||
|
|
20cc0573b5 | ||
|
|
4b932d568d | ||
|
|
6237cd0fec | ||
|
|
35fabdf681 | ||
|
|
f09343c97f | ||
|
|
f2570d89d0 | ||
|
|
ac2a09d6af | ||
|
|
c6eaf9cb9f | ||
|
|
f2395096ca | ||
|
|
3f6f4ccb6f | ||
|
|
67eba8ee6c | ||
|
|
0aa1bc8733 | ||
|
|
d1970d99be | ||
|
|
fddb35e592 | ||
|
|
e26715c809 | ||
|
|
26aa4b7df0 | ||
|
|
de18846280 | ||
|
|
38c2e69648 | ||
|
|
593dbaf19d | ||
|
|
bf44b07be1 | ||
|
|
102a8eb065 | ||
|
|
68edb3187f | ||
|
|
4a81a06faf | ||
|
|
caf541ef65 | ||
|
|
05c3846b7b | ||
|
|
1b2054c4b8 | ||
|
|
a7fbfa2017 | ||
|
|
db09b73b32 | ||
|
|
843ef36148 | ||
|
|
aa7fc2e3b0 | ||
|
|
fc4a79c111 | ||
|
|
4db7f49fa5 | ||
|
|
edee0710aa | ||
|
|
9bcbd496e5 | ||
|
|
90b3ba2e40 | ||
|
|
9ceec03bd0 | ||
|
|
5e9df65656 | ||
|
|
2eda2fe9ca | ||
|
|
9c9119a107 | ||
|
|
2c1a1dffc0 | ||
|
|
ed7bf53b89 | ||
|
|
4375df2689 | ||
|
|
789cef74ce | ||
|
|
5b403a2255 | ||
|
|
62653ec1c3 | ||
|
|
83076227e7 | ||
|
|
f90904a337 | ||
|
|
4e0c8591cd | ||
|
|
997b562682 | ||
|
|
fbd62315d0 | ||
|
|
060af0c187 | ||
|
|
b76738f467 | ||
|
|
8598167114 | ||
|
|
001f61bb6a | ||
|
|
9b9962ed29 | ||
|
|
b414d22a3b | ||
|
|
248363ae4a | ||
|
|
c4b61a36db | ||
|
|
35d16d88a8 | ||
|
|
fe48d6a034 | ||
|
|
bca4b2053f | ||
|
|
3330242cd8 | ||
|
|
f10f71f29b | ||
|
|
7bd39b7182 | ||
|
|
2d557fb0ee | ||
|
|
14d631eeaa | ||
|
|
380daf479c | ||
|
|
cb814a9952 | ||
|
|
795caf626c | ||
|
|
fb32357f31 | ||
|
|
e4010b3b83 | ||
|
|
17244a9ede | ||
|
|
61b04fd3d7 | ||
|
|
4e57a22a40 | ||
|
|
8e10057882 | ||
|
|
0222b9a189 | ||
|
|
5e96d5aef7 | ||
|
|
234120f510 | ||
|
|
08f7ab88ec | ||
|
|
005c67c9b6 | ||
|
|
cb08b5b582 | ||
|
|
105b3ce8de | ||
|
|
78a5016abc | ||
|
|
7e6bd4ad0a | ||
|
|
e1c6cd04b7 | ||
|
|
a15efdff09 | ||
|
|
21823721d0 | ||
|
|
aad978be4b | ||
|
|
6dd2cbd991 | ||
|
|
d7b76cc207 | ||
|
|
ae0ccb29d2 | ||
|
|
4bcde2e4b4 | ||
|
|
2356c896f6 | ||
|
|
d5445abeee | ||
|
|
673ec0067d | ||
|
|
74f657cb33 | ||
|
|
08be778cc5 | ||
|
|
6da2f5a0bb | ||
|
|
107190765f | ||
|
|
df726e6f7b | ||
|
|
e5b1ccc36b | ||
|
|
40cb604c57 | ||
|
|
9152c80c76 | ||
|
|
574092562e | ||
|
|
2e931adce7 | ||
|
|
e36c395db1 | ||
|
|
cfe88629ab | ||
|
|
30a98f0968 |
9
.github/workflows/CI.yml
vendored
9
.github/workflows/CI.yml
vendored
@@ -79,8 +79,6 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
platform:
|
platform:
|
||||||
- runner: macos-12
|
|
||||||
target: x86_64
|
|
||||||
- runner: macos-14
|
- runner: macos-14
|
||||||
target: aarch64
|
target: aarch64
|
||||||
steps:
|
steps:
|
||||||
@@ -132,7 +130,7 @@ jobs:
|
|||||||
args: --non-interactive --skip-existing wheels-*/*
|
args: --non-interactive --skip-existing wheels-*/*
|
||||||
|
|
||||||
push-docker:
|
push-docker:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ${{ matrix.platform }}
|
||||||
if: "startsWith(github.ref, 'refs/tags/')"
|
if: "startsWith(github.ref, 'refs/tags/')"
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@@ -140,9 +138,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
tag: [cpu, cuda]
|
tag: [cpu, cuda]
|
||||||
platform:
|
platform: [ubuntu-latest, ubuntu-24.04-arm]
|
||||||
- linux/amd64
|
|
||||||
- linux/arm64
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
@@ -163,4 +159,3 @@ jobs:
|
|||||||
tags: |
|
tags: |
|
||||||
ghcr.io/${{ github.repository }}:${{ matrix.tag }}
|
ghcr.io/${{ github.repository }}:${{ matrix.tag }}
|
||||||
file: docker/${{ matrix.tag }}.Dockerfile
|
file: docker/${{ matrix.tag }}.Dockerfile
|
||||||
platforms: ${{ matrix.platform }}
|
|
||||||
|
|||||||
598
Cargo.lock
generated
598
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -94,7 +94,7 @@ model = get_net_g(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def forward(x, x_len, sid, tone, lang, bert, style, length_scale, sdp_ratio):
|
def forward(x, x_len, sid, tone, lang, bert, style, length_scale, sdp_ratio, noise_scale, noise_scale_w):
|
||||||
return model.infer(
|
return model.infer(
|
||||||
x,
|
x,
|
||||||
x_len,
|
x_len,
|
||||||
@@ -105,6 +105,8 @@ def forward(x, x_len, sid, tone, lang, bert, style, length_scale, sdp_ratio):
|
|||||||
style,
|
style,
|
||||||
sdp_ratio=sdp_ratio,
|
sdp_ratio=sdp_ratio,
|
||||||
length_scale=length_scale,
|
length_scale=length_scale,
|
||||||
|
noise_scale=noise_scale,
|
||||||
|
noise_scale_w=noise_scale_w,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -122,6 +124,8 @@ torch.onnx.export(
|
|||||||
style_vec_tensor,
|
style_vec_tensor,
|
||||||
torch.tensor(1.0),
|
torch.tensor(1.0),
|
||||||
torch.tensor(0.0),
|
torch.tensor(0.0),
|
||||||
|
torch.tensor(0.6777),
|
||||||
|
torch.tensor(0.8),
|
||||||
),
|
),
|
||||||
f"../models/model_{out_name}.onnx",
|
f"../models/model_{out_name}.onnx",
|
||||||
verbose=True,
|
verbose=True,
|
||||||
@@ -144,6 +148,8 @@ torch.onnx.export(
|
|||||||
"style_vec",
|
"style_vec",
|
||||||
"length_scale",
|
"length_scale",
|
||||||
"sdp_ratio",
|
"sdp_ratio",
|
||||||
|
"noise_scale",
|
||||||
|
"noise_scale_w"
|
||||||
],
|
],
|
||||||
output_names=["output"],
|
output_names=["output"],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "sbv2_api"
|
name = "sbv2_api"
|
||||||
version = "0.2.0-alpha"
|
version = "0.2.0-alpha4"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
@@ -9,7 +9,7 @@ axum = "0.7.5"
|
|||||||
dotenvy.workspace = true
|
dotenvy.workspace = true
|
||||||
env_logger.workspace = true
|
env_logger.workspace = true
|
||||||
log = "0.4.22"
|
log = "0.4.22"
|
||||||
sbv2_core = { version = "0.2.0-alpha2", path = "../sbv2_core" }
|
sbv2_core = { version = "0.2.0-alpha2", path = "../sbv2_core", features = ["aivmx"] }
|
||||||
serde = { version = "1.0.210", features = ["derive"] }
|
serde = { version = "1.0.210", features = ["derive"] }
|
||||||
tokio = { version = "1.40.0", features = ["full"] }
|
tokio = { version = "1.40.0", features = ["full"] }
|
||||||
utoipa = { version = "5.0.0", features = ["axum_extras"] }
|
utoipa = { version = "5.0.0", features = ["axum_extras"] }
|
||||||
|
|||||||
@@ -40,6 +40,14 @@ fn length_default() -> f32 {
|
|||||||
1.0
|
1.0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn style_id_default() -> i32 {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn speaker_id_default() -> i64 {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, ToSchema)]
|
#[derive(Deserialize, ToSchema)]
|
||||||
struct SynthesizeRequest {
|
struct SynthesizeRequest {
|
||||||
text: String,
|
text: String,
|
||||||
@@ -48,6 +56,10 @@ struct SynthesizeRequest {
|
|||||||
sdp_ratio: f32,
|
sdp_ratio: f32,
|
||||||
#[serde(default = "length_default")]
|
#[serde(default = "length_default")]
|
||||||
length_scale: f32,
|
length_scale: f32,
|
||||||
|
#[serde(default = "style_id_default")]
|
||||||
|
style_id: i32,
|
||||||
|
#[serde(default = "speaker_id_default")]
|
||||||
|
speaker_id: i64,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[utoipa::path(
|
#[utoipa::path(
|
||||||
@@ -65,15 +77,18 @@ async fn synthesize(
|
|||||||
ident,
|
ident,
|
||||||
sdp_ratio,
|
sdp_ratio,
|
||||||
length_scale,
|
length_scale,
|
||||||
|
style_id,
|
||||||
|
speaker_id,
|
||||||
}): Json<SynthesizeRequest>,
|
}): Json<SynthesizeRequest>,
|
||||||
) -> AppResult<impl IntoResponse> {
|
) -> AppResult<impl IntoResponse> {
|
||||||
log::debug!("processing request: text={text}, ident={ident}, sdp_ratio={sdp_ratio}, length_scale={length_scale}");
|
log::debug!("processing request: text={text}, ident={ident}, sdp_ratio={sdp_ratio}, length_scale={length_scale}");
|
||||||
let buffer = {
|
let buffer = {
|
||||||
let tts_model = state.tts_model.lock().await;
|
let mut tts_model = state.tts_model.lock().await;
|
||||||
tts_model.easy_synthesize(
|
tts_model.easy_synthesize(
|
||||||
&ident,
|
&ident,
|
||||||
&text,
|
&text,
|
||||||
0,
|
style_id,
|
||||||
|
speaker_id,
|
||||||
SynthesizeOptions {
|
SynthesizeOptions {
|
||||||
sdp_ratio,
|
sdp_ratio,
|
||||||
length_scale,
|
length_scale,
|
||||||
@@ -94,6 +109,9 @@ impl AppState {
|
|||||||
let mut tts_model = TTSModelHolder::new(
|
let mut tts_model = TTSModelHolder::new(
|
||||||
&fs::read(env::var("BERT_MODEL_PATH")?).await?,
|
&fs::read(env::var("BERT_MODEL_PATH")?).await?,
|
||||||
&fs::read(env::var("TOKENIZER_PATH")?).await?,
|
&fs::read(env::var("TOKENIZER_PATH")?).await?,
|
||||||
|
env::var("HOLDER_MAX_LOADED_MODElS")
|
||||||
|
.ok()
|
||||||
|
.and_then(|x| x.parse().ok()),
|
||||||
)?;
|
)?;
|
||||||
let models = env::var("MODELS_PATH").unwrap_or("models".to_string());
|
let models = env::var("MODELS_PATH").unwrap_or("models".to_string());
|
||||||
let mut f = fs::read_dir(&models).await?;
|
let mut f = fs::read_dir(&models).await?;
|
||||||
@@ -122,6 +140,20 @@ impl AppState {
|
|||||||
log::warn!("Error loading {entry}: {e}");
|
log::warn!("Error loading {entry}: {e}");
|
||||||
};
|
};
|
||||||
log::info!("Loaded: {entry}");
|
log::info!("Loaded: {entry}");
|
||||||
|
} else if name.ends_with(".aivmx") {
|
||||||
|
let entry = &name[..name.len() - 6];
|
||||||
|
log::info!("Try loading: {entry}");
|
||||||
|
let aivmx_bytes = match fs::read(format!("{models}/{entry}.aivmx")).await {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("Error loading aivmx bytes from file {entry}: {e}");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if let Err(e) = tts_model.load_aivmx(entry, aivmx_bytes) {
|
||||||
|
log::error!("Error loading {entry}: {e}");
|
||||||
|
}
|
||||||
|
log::info!("Loaded: {entry}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for entry in entries {
|
for entry in entries {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "sbv2_bindings"
|
name = "sbv2_bindings"
|
||||||
version = "0.2.0-alpha2"
|
version = "0.2.0-alpha4"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
@@ -11,5 +11,5 @@ crate-type = ["cdylib"]
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
ndarray.workspace = true
|
ndarray.workspace = true
|
||||||
pyo3 = { version = "0.22.0", features = ["anyhow"] }
|
pyo3 = { version = "0.23.0", features = ["anyhow"] }
|
||||||
sbv2_core = { version = "0.2.0-alpha2", path = "../sbv2_core" }
|
sbv2_core = { version = "0.2.0-alpha2", path = "../sbv2_core" }
|
||||||
|
|||||||
@@ -23,10 +23,15 @@ pub struct TTSModel {
|
|||||||
|
|
||||||
#[pymethods]
|
#[pymethods]
|
||||||
impl TTSModel {
|
impl TTSModel {
|
||||||
|
#[pyo3(signature = (bert_model_bytes, tokenizer_bytes, max_loaded_models=None))]
|
||||||
#[new]
|
#[new]
|
||||||
fn new(bert_model_bytes: Vec<u8>, tokenizer_bytes: Vec<u8>) -> anyhow::Result<Self> {
|
fn new(
|
||||||
|
bert_model_bytes: Vec<u8>,
|
||||||
|
tokenizer_bytes: Vec<u8>,
|
||||||
|
max_loaded_models: Option<usize>,
|
||||||
|
) -> anyhow::Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
model: TTSModelHolder::new(bert_model_bytes, tokenizer_bytes)?,
|
model: TTSModelHolder::new(bert_model_bytes, tokenizer_bytes, max_loaded_models)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,10 +43,21 @@ impl TTSModel {
|
|||||||
/// BERTモデルのパス
|
/// BERTモデルのパス
|
||||||
/// tokenizer_path : str
|
/// tokenizer_path : str
|
||||||
/// トークナイザーのパス
|
/// トークナイザーのパス
|
||||||
|
/// max_loaded_models: int | None
|
||||||
|
/// 同時にVRAMに存在するモデルの数
|
||||||
|
#[pyo3(signature = (bert_model_path, tokenizer_path, max_loaded_models=None))]
|
||||||
#[staticmethod]
|
#[staticmethod]
|
||||||
fn from_path(bert_model_path: String, tokenizer_path: String) -> anyhow::Result<Self> {
|
fn from_path(
|
||||||
|
bert_model_path: String,
|
||||||
|
tokenizer_path: String,
|
||||||
|
max_loaded_models: Option<usize>,
|
||||||
|
) -> anyhow::Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
model: TTSModelHolder::new(fs::read(bert_model_path)?, fs::read(tokenizer_path)?)?,
|
model: TTSModelHolder::new(
|
||||||
|
fs::read(bert_model_path)?,
|
||||||
|
fs::read(tokenizer_path)?,
|
||||||
|
max_loaded_models,
|
||||||
|
)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,11 +137,12 @@ impl TTSModel {
|
|||||||
/// voice_data : bytes
|
/// voice_data : bytes
|
||||||
/// 音声データ
|
/// 音声データ
|
||||||
fn synthesize<'p>(
|
fn synthesize<'p>(
|
||||||
&'p self,
|
&'p mut self,
|
||||||
py: Python<'p>,
|
py: Python<'p>,
|
||||||
text: String,
|
text: String,
|
||||||
ident: String,
|
ident: String,
|
||||||
style_id: i32,
|
style_id: i32,
|
||||||
|
speaker_id: i64,
|
||||||
sdp_ratio: f32,
|
sdp_ratio: f32,
|
||||||
length_scale: f32,
|
length_scale: f32,
|
||||||
) -> anyhow::Result<Bound<PyBytes>> {
|
) -> anyhow::Result<Bound<PyBytes>> {
|
||||||
@@ -133,13 +150,14 @@ impl TTSModel {
|
|||||||
ident.as_str(),
|
ident.as_str(),
|
||||||
&text,
|
&text,
|
||||||
style_id,
|
style_id,
|
||||||
|
speaker_id,
|
||||||
SynthesizeOptions {
|
SynthesizeOptions {
|
||||||
sdp_ratio,
|
sdp_ratio,
|
||||||
length_scale,
|
length_scale,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
Ok(PyBytes::new_bound(py, &data))
|
Ok(PyBytes::new(py, &data))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unload(&mut self, ident: String) -> bool {
|
fn unload(&mut self, ident: String) -> bool {
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "sbv2_core"
|
name = "sbv2_core"
|
||||||
description = "Style-Bert-VITSの推論ライブラリ"
|
description = "Style-Bert-VITSの推論ライブラリ"
|
||||||
version = "0.2.0-alpha2"
|
version = "0.2.0-alpha4"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
readme = "../README.md"
|
readme = "../README.md"
|
||||||
@@ -10,20 +10,22 @@ documentation = "https://docs.rs/sbv2_core"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
|
base64 = { version = "0.22.1", optional = true }
|
||||||
dotenvy.workspace = true
|
dotenvy.workspace = true
|
||||||
env_logger.workspace = true
|
env_logger.workspace = true
|
||||||
hound = "3.5.1"
|
hound = "3.5.1"
|
||||||
jpreprocess = { version = "0.10.0", features = ["naist-jdic"] }
|
jpreprocess = { version = "0.10.0", features = ["naist-jdic"] }
|
||||||
ndarray.workspace = true
|
ndarray.workspace = true
|
||||||
|
npyz = { version = "0.8.3", optional = true }
|
||||||
num_cpus = "1.16.0"
|
num_cpus = "1.16.0"
|
||||||
once_cell.workspace = true
|
once_cell.workspace = true
|
||||||
ort = { git = "https://github.com/pykeio/ort.git", version = "2.0.0-rc.6", optional = true }
|
ort = { git = "https://github.com/pykeio/ort.git", version = "2.0.0-rc.8", optional = true }
|
||||||
regex = "1.10.6"
|
regex = "1.10.6"
|
||||||
serde = { version = "1.0.210", features = ["derive"] }
|
serde = { version = "1.0.210", features = ["derive"] }
|
||||||
serde_json = "1.0.128"
|
serde_json = "1.0.128"
|
||||||
tar = "0.4.41"
|
tar = "0.4.41"
|
||||||
thiserror = "1.0.63"
|
thiserror = "1.0.63"
|
||||||
tokenizers = { version = "0.20.0", default-features = false }
|
tokenizers = { version = "0.21.0", default-features = false }
|
||||||
zstd = "0.13.2"
|
zstd = "0.13.2"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
@@ -35,4 +37,6 @@ directml = ["ort/directml", "std"]
|
|||||||
tensorrt = ["ort/tensorrt", "std"]
|
tensorrt = ["ort/tensorrt", "std"]
|
||||||
coreml = ["ort/coreml", "std"]
|
coreml = ["ort/coreml", "std"]
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
no_std = ["tokenizers/unstable_wasm"]
|
no_std = ["tokenizers/unstable_wasm"]
|
||||||
|
aivmx = ["npyz", "base64"]
|
||||||
|
base64 = ["dep:base64"]
|
||||||
|
|||||||
@@ -21,6 +21,9 @@ pub enum Error {
|
|||||||
HoundError(#[from] hound::Error),
|
HoundError(#[from] hound::Error),
|
||||||
#[error("model not found error")]
|
#[error("model not found error")]
|
||||||
ModelNotFoundError(String),
|
ModelNotFoundError(String),
|
||||||
|
#[cfg(feature = "base64")]
|
||||||
|
#[error("base64 error")]
|
||||||
|
Base64Error(#[from] base64::DecodeError),
|
||||||
#[error("other")]
|
#[error("other")]
|
||||||
OtherError(String),
|
OtherError(String),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,14 +11,26 @@ fn main_inner() -> anyhow::Result<()> {
|
|||||||
let mut tts_holder = tts::TTSModelHolder::new(
|
let mut tts_holder = tts::TTSModelHolder::new(
|
||||||
&fs::read(env::var("BERT_MODEL_PATH")?)?,
|
&fs::read(env::var("BERT_MODEL_PATH")?)?,
|
||||||
&fs::read(env::var("TOKENIZER_PATH")?)?,
|
&fs::read(env::var("TOKENIZER_PATH")?)?,
|
||||||
|
env::var("HOLDER_MAX_LOADED_MODElS")
|
||||||
|
.ok()
|
||||||
|
.and_then(|x| x.parse().ok()),
|
||||||
)?;
|
)?;
|
||||||
tts_holder.load_sbv2file(ident, fs::read(env::var("MODEL_PATH")?)?)?;
|
#[cfg(not(feature = "aivmx"))]
|
||||||
|
{
|
||||||
|
tts_holder.load_sbv2file(ident, fs::read(env::var("MODEL_PATH")?)?)?;
|
||||||
|
}
|
||||||
|
#[cfg(feature = "aivmx")]
|
||||||
|
{
|
||||||
|
tts_holder.load_aivmx(ident, fs::read(env::var("MODEL_PATH")?)?)?;
|
||||||
|
}
|
||||||
|
|
||||||
let audio = tts_holder.easy_synthesize(ident, &text, 0, tts::SynthesizeOptions::default())?;
|
let audio =
|
||||||
|
tts_holder.easy_synthesize(ident, &text, 0, 0, tts::SynthesizeOptions::default())?;
|
||||||
fs::write("output.wav", audio)?;
|
fs::write("output.wav", audio)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
fn main_inner() -> anyhow::Result<()> {
|
fn main_inner() -> anyhow::Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -52,11 +52,14 @@ pub fn synthesize(
|
|||||||
session: &Session,
|
session: &Session,
|
||||||
bert_ori: Array2<f32>,
|
bert_ori: Array2<f32>,
|
||||||
x_tst: Array1<i64>,
|
x_tst: Array1<i64>,
|
||||||
|
sid: Array1<i64>,
|
||||||
tones: Array1<i64>,
|
tones: Array1<i64>,
|
||||||
lang_ids: Array1<i64>,
|
lang_ids: Array1<i64>,
|
||||||
style_vector: Array1<f32>,
|
style_vector: Array1<f32>,
|
||||||
sdp_ratio: f32,
|
sdp_ratio: f32,
|
||||||
length_scale: f32,
|
length_scale: f32,
|
||||||
|
noise_scale: f32,
|
||||||
|
noise_scale_w: f32,
|
||||||
) -> Result<Array3<f32>> {
|
) -> Result<Array3<f32>> {
|
||||||
let bert = bert_ori.insert_axis(Axis(0));
|
let bert = bert_ori.insert_axis(Axis(0));
|
||||||
let x_tst_lengths: Array1<i64> = array![x_tst.shape()[0] as i64];
|
let x_tst_lengths: Array1<i64> = array![x_tst.shape()[0] as i64];
|
||||||
@@ -67,13 +70,15 @@ pub fn synthesize(
|
|||||||
let outputs = session.run(ort::inputs! {
|
let outputs = session.run(ort::inputs! {
|
||||||
"x_tst" => x_tst,
|
"x_tst" => x_tst,
|
||||||
"x_tst_lengths" => x_tst_lengths,
|
"x_tst_lengths" => x_tst_lengths,
|
||||||
"sid" => array![0_i64],
|
"sid" => sid,
|
||||||
"tones" => tones,
|
"tones" => tones,
|
||||||
"language" => lang_ids,
|
"language" => lang_ids,
|
||||||
"bert" => bert,
|
"bert" => bert,
|
||||||
"style_vec" => style_vector,
|
"style_vec" => style_vector,
|
||||||
"sdp_ratio" => array![sdp_ratio],
|
"sdp_ratio" => array![sdp_ratio],
|
||||||
"length_scale" => array![length_scale],
|
"length_scale" => array![length_scale],
|
||||||
|
"noise_scale" => array![noise_scale],
|
||||||
|
"noise_scale_w" => array![noise_scale_w]
|
||||||
}?)?;
|
}?)?;
|
||||||
|
|
||||||
let audio_array = outputs["output"]
|
let audio_array = outputs["output"]
|
||||||
|
|||||||
@@ -1,7 +1,13 @@
|
|||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::{jtalk, model, style, tokenizer, tts_util};
|
use crate::{jtalk, model, style, tokenizer, tts_util};
|
||||||
|
#[cfg(feature = "aivmx")]
|
||||||
|
use base64::prelude::{Engine as _, BASE64_STANDARD};
|
||||||
|
#[cfg(feature = "aivmx")]
|
||||||
|
use ndarray::ShapeBuilder;
|
||||||
use ndarray::{concatenate, Array1, Array2, Array3, Axis};
|
use ndarray::{concatenate, Array1, Array2, Array3, Axis};
|
||||||
use ort::Session;
|
use ort::Session;
|
||||||
|
#[cfg(feature = "aivmx")]
|
||||||
|
use std::io::Cursor;
|
||||||
use tokenizers::Tokenizer;
|
use tokenizers::Tokenizer;
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
@@ -24,9 +30,10 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct TTSModel {
|
pub struct TTSModel {
|
||||||
vits2: Session,
|
vits2: Option<Session>,
|
||||||
style_vectors: Array2<f32>,
|
style_vectors: Array2<f32>,
|
||||||
ident: TTSIdent,
|
ident: TTSIdent,
|
||||||
|
bytes: Option<Vec<u8>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// High-level Style-Bert-VITS2's API
|
/// High-level Style-Bert-VITS2's API
|
||||||
@@ -35,6 +42,7 @@ pub struct TTSModelHolder {
|
|||||||
bert: Session,
|
bert: Session,
|
||||||
models: Vec<TTSModel>,
|
models: Vec<TTSModel>,
|
||||||
jtalk: jtalk::JTalk,
|
jtalk: jtalk::JTalk,
|
||||||
|
max_loaded_models: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TTSModelHolder {
|
impl TTSModelHolder {
|
||||||
@@ -43,9 +51,13 @@ impl TTSModelHolder {
|
|||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```rs
|
/// ```rs
|
||||||
/// let mut tts_holder = TTSModelHolder::new(std::fs::read("deberta.onnx")?, std::fs::read("tokenizer.json")?)?;
|
/// let mut tts_holder = TTSModelHolder::new(std::fs::read("deberta.onnx")?, std::fs::read("tokenizer.json")?, None)?;
|
||||||
/// ```
|
/// ```
|
||||||
pub fn new<P: AsRef<[u8]>>(bert_model_bytes: P, tokenizer_bytes: P) -> Result<Self> {
|
pub fn new<P: AsRef<[u8]>>(
|
||||||
|
bert_model_bytes: P,
|
||||||
|
tokenizer_bytes: P,
|
||||||
|
max_loaded_models: Option<usize>,
|
||||||
|
) -> Result<Self> {
|
||||||
let bert = model::load_model(bert_model_bytes, true)?;
|
let bert = model::load_model(bert_model_bytes, true)?;
|
||||||
let jtalk = jtalk::JTalk::new()?;
|
let jtalk = jtalk::JTalk::new()?;
|
||||||
let tokenizer = tokenizer::get_tokenizer(tokenizer_bytes)?;
|
let tokenizer = tokenizer::get_tokenizer(tokenizer_bytes)?;
|
||||||
@@ -54,6 +66,7 @@ impl TTSModelHolder {
|
|||||||
models: vec![],
|
models: vec![],
|
||||||
jtalk,
|
jtalk,
|
||||||
tokenizer,
|
tokenizer,
|
||||||
|
max_loaded_models,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -62,6 +75,53 @@ impl TTSModelHolder {
|
|||||||
self.models.iter().map(|m| m.ident.to_string()).collect()
|
self.models.iter().map(|m| m.ident.to_string()).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "aivmx")]
|
||||||
|
pub fn load_aivmx<I: Into<TTSIdent>, P: AsRef<[u8]>>(
|
||||||
|
&mut self,
|
||||||
|
ident: I,
|
||||||
|
aivmx_bytes: P,
|
||||||
|
) -> Result<()> {
|
||||||
|
let ident = ident.into();
|
||||||
|
if self.find_model(ident.clone()).is_err() {
|
||||||
|
let mut load = true;
|
||||||
|
if let Some(max) = self.max_loaded_models {
|
||||||
|
if self.models.iter().filter(|x| x.vits2.is_some()).count() >= max {
|
||||||
|
load = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let model = model::load_model(&aivmx_bytes, false)?;
|
||||||
|
let metadata = model.metadata()?;
|
||||||
|
if let Some(aivm_style_vectors) = metadata.custom("aivm_style_vectors")? {
|
||||||
|
let aivm_style_vectors = BASE64_STANDARD.decode(aivm_style_vectors)?;
|
||||||
|
let style_vectors = Cursor::new(&aivm_style_vectors);
|
||||||
|
let reader = npyz::NpyFile::new(style_vectors)?;
|
||||||
|
let style_vectors = {
|
||||||
|
let shape = reader.shape().to_vec();
|
||||||
|
let order = reader.order();
|
||||||
|
let data = reader.into_vec::<f32>()?;
|
||||||
|
let shape = match shape[..] {
|
||||||
|
[i1, i2] => [i1 as usize, i2 as usize],
|
||||||
|
_ => panic!("expected 2D array"),
|
||||||
|
};
|
||||||
|
let true_shape = shape.set_f(order == npyz::Order::Fortran);
|
||||||
|
ndarray::Array2::from_shape_vec(true_shape, data)?
|
||||||
|
};
|
||||||
|
drop(metadata);
|
||||||
|
self.models.push(TTSModel {
|
||||||
|
vits2: if load { Some(model) } else { None },
|
||||||
|
bytes: if self.max_loaded_models.is_some() {
|
||||||
|
Some(aivmx_bytes.as_ref().to_vec())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
|
ident,
|
||||||
|
style_vectors,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Load a .sbv2 file binary
|
/// Load a .sbv2 file binary
|
||||||
///
|
///
|
||||||
/// # Examples
|
/// # Examples
|
||||||
@@ -94,10 +154,25 @@ impl TTSModelHolder {
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let ident = ident.into();
|
let ident = ident.into();
|
||||||
if self.find_model(ident.clone()).is_err() {
|
if self.find_model(ident.clone()).is_err() {
|
||||||
|
let mut load = true;
|
||||||
|
if let Some(max) = self.max_loaded_models {
|
||||||
|
if self.models.iter().filter(|x| x.vits2.is_some()).count() >= max {
|
||||||
|
load = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
self.models.push(TTSModel {
|
self.models.push(TTSModel {
|
||||||
vits2: model::load_model(vits2_bytes, false)?,
|
vits2: if load {
|
||||||
|
Some(model::load_model(&vits2_bytes, false)?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
style_vectors: style::load_style(style_vectors_bytes)?,
|
style_vectors: style::load_style(style_vectors_bytes)?,
|
||||||
ident,
|
ident,
|
||||||
|
bytes: if self.max_loaded_models.is_some() {
|
||||||
|
Some(vits2_bytes.as_ref().to_vec())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -145,6 +220,42 @@ impl TTSModelHolder {
|
|||||||
.find(|m| m.ident == ident)
|
.find(|m| m.ident == ident)
|
||||||
.ok_or(Error::ModelNotFoundError(ident.to_string()))
|
.ok_or(Error::ModelNotFoundError(ident.to_string()))
|
||||||
}
|
}
|
||||||
|
fn find_and_load_model<I: Into<TTSIdent>>(&mut self, ident: I) -> Result<bool> {
|
||||||
|
let ident = ident.into();
|
||||||
|
let (bytes, style_vectors) = {
|
||||||
|
let model = self
|
||||||
|
.models
|
||||||
|
.iter()
|
||||||
|
.find(|m| m.ident == ident)
|
||||||
|
.ok_or(Error::ModelNotFoundError(ident.to_string()))?;
|
||||||
|
if model.vits2.is_some() {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
(model.bytes.clone().unwrap(), model.style_vectors.clone())
|
||||||
|
};
|
||||||
|
self.unload(ident.clone());
|
||||||
|
let s = model::load_model(&bytes, false)?;
|
||||||
|
if let Some(max) = self.max_loaded_models {
|
||||||
|
if self.models.iter().filter(|x| x.vits2.is_some()).count() >= max {
|
||||||
|
self.unload(self.models.first().unwrap().ident.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.models.push(TTSModel {
|
||||||
|
bytes: Some(bytes.to_vec()),
|
||||||
|
vits2: Some(s),
|
||||||
|
style_vectors,
|
||||||
|
ident: ident.clone(),
|
||||||
|
});
|
||||||
|
let model = self
|
||||||
|
.models
|
||||||
|
.iter()
|
||||||
|
.find(|m| m.ident == ident)
|
||||||
|
.ok_or(Error::ModelNotFoundError(ident.to_string()))?;
|
||||||
|
if model.vits2.is_some() {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
Err(Error::ModelNotFoundError(ident.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
/// Get style vector by style id and weight
|
/// Get style vector by style id and weight
|
||||||
///
|
///
|
||||||
@@ -167,12 +278,19 @@ impl TTSModelHolder {
|
|||||||
/// let audio = tts_holder.easy_synthesize("tsukuyomi", "こんにちは", 0, SynthesizeOptions::default())?;
|
/// let audio = tts_holder.easy_synthesize("tsukuyomi", "こんにちは", 0, SynthesizeOptions::default())?;
|
||||||
/// ```
|
/// ```
|
||||||
pub fn easy_synthesize<I: Into<TTSIdent> + Copy>(
|
pub fn easy_synthesize<I: Into<TTSIdent> + Copy>(
|
||||||
&self,
|
&mut self,
|
||||||
ident: I,
|
ident: I,
|
||||||
text: &str,
|
text: &str,
|
||||||
style_id: i32,
|
style_id: i32,
|
||||||
|
speaker_id: i64,
|
||||||
options: SynthesizeOptions,
|
options: SynthesizeOptions,
|
||||||
) -> Result<Vec<u8>> {
|
) -> Result<Vec<u8>> {
|
||||||
|
self.find_and_load_model(ident)?;
|
||||||
|
let vits2 = &self
|
||||||
|
.find_model(ident)?
|
||||||
|
.vits2
|
||||||
|
.as_ref()
|
||||||
|
.ok_or(Error::ModelNotFoundError(ident.into().to_string()))?;
|
||||||
let style_vector = self.get_style_vector(ident, style_id, options.style_weight)?;
|
let style_vector = self.get_style_vector(ident, style_id, options.style_weight)?;
|
||||||
let audio_array = if options.split_sentences {
|
let audio_array = if options.split_sentences {
|
||||||
let texts: Vec<&str> = text.split('\n').collect();
|
let texts: Vec<&str> = text.split('\n').collect();
|
||||||
@@ -183,14 +301,17 @@ impl TTSModelHolder {
|
|||||||
}
|
}
|
||||||
let (bert_ori, phones, tones, lang_ids) = self.parse_text(t)?;
|
let (bert_ori, phones, tones, lang_ids) = self.parse_text(t)?;
|
||||||
let audio = model::synthesize(
|
let audio = model::synthesize(
|
||||||
&self.find_model(ident)?.vits2,
|
vits2,
|
||||||
bert_ori.to_owned(),
|
bert_ori.to_owned(),
|
||||||
phones,
|
phones,
|
||||||
|
Array1::from_vec(vec![speaker_id]),
|
||||||
tones,
|
tones,
|
||||||
lang_ids,
|
lang_ids,
|
||||||
style_vector.clone(),
|
style_vector.clone(),
|
||||||
options.sdp_ratio,
|
options.sdp_ratio,
|
||||||
options.length_scale,
|
options.length_scale,
|
||||||
|
0.677,
|
||||||
|
0.8,
|
||||||
)?;
|
)?;
|
||||||
audios.push(audio.clone());
|
audios.push(audio.clone());
|
||||||
if i != texts.len() - 1 {
|
if i != texts.len() - 1 {
|
||||||
@@ -204,47 +325,21 @@ impl TTSModelHolder {
|
|||||||
} else {
|
} else {
|
||||||
let (bert_ori, phones, tones, lang_ids) = self.parse_text(text)?;
|
let (bert_ori, phones, tones, lang_ids) = self.parse_text(text)?;
|
||||||
model::synthesize(
|
model::synthesize(
|
||||||
&self.find_model(ident)?.vits2,
|
vits2,
|
||||||
bert_ori.to_owned(),
|
bert_ori.to_owned(),
|
||||||
phones,
|
phones,
|
||||||
|
Array1::from_vec(vec![speaker_id]),
|
||||||
tones,
|
tones,
|
||||||
lang_ids,
|
lang_ids,
|
||||||
style_vector,
|
style_vector,
|
||||||
options.sdp_ratio,
|
options.sdp_ratio,
|
||||||
options.length_scale,
|
options.length_scale,
|
||||||
|
0.677,
|
||||||
|
0.8,
|
||||||
)?
|
)?
|
||||||
};
|
};
|
||||||
tts_util::array_to_vec(audio_array)
|
tts_util::array_to_vec(audio_array)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Synthesize text to audio
|
|
||||||
///
|
|
||||||
/// # Note
|
|
||||||
/// This function is for low-level usage, use `easy_synthesize` for high-level usage.
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub fn synthesize<I: Into<TTSIdent>>(
|
|
||||||
&self,
|
|
||||||
ident: I,
|
|
||||||
bert_ori: Array2<f32>,
|
|
||||||
phones: Array1<i64>,
|
|
||||||
tones: Array1<i64>,
|
|
||||||
lang_ids: Array1<i64>,
|
|
||||||
style_vector: Array1<f32>,
|
|
||||||
sdp_ratio: f32,
|
|
||||||
length_scale: f32,
|
|
||||||
) -> Result<Vec<u8>> {
|
|
||||||
let audio_array = model::synthesize(
|
|
||||||
&self.find_model(ident)?.vits2,
|
|
||||||
bert_ori.to_owned(),
|
|
||||||
phones,
|
|
||||||
tones,
|
|
||||||
lang_ids,
|
|
||||||
style_vector,
|
|
||||||
sdp_ratio,
|
|
||||||
length_scale,
|
|
||||||
)?;
|
|
||||||
tts_util::array_to_vec(audio_array)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Synthesize options
|
/// Synthesize options
|
||||||
|
|||||||
26
sbv2_wasm/pnpm-lock.yaml
generated
26
sbv2_wasm/pnpm-lock.yaml
generated
@@ -10,14 +10,14 @@ importers:
|
|||||||
dependencies:
|
dependencies:
|
||||||
onnxruntime-web:
|
onnxruntime-web:
|
||||||
specifier: ^1.19.2
|
specifier: ^1.19.2
|
||||||
version: 1.19.2
|
version: 1.20.0
|
||||||
devDependencies:
|
devDependencies:
|
||||||
'@biomejs/biome':
|
'@biomejs/biome':
|
||||||
specifier: ^1.9.2
|
specifier: ^1.9.2
|
||||||
version: 1.9.4
|
version: 1.9.4
|
||||||
'@types/node':
|
'@types/node':
|
||||||
specifier: ^22.7.4
|
specifier: ^22.7.4
|
||||||
version: 22.7.7
|
version: 22.8.0
|
||||||
esbuild:
|
esbuild:
|
||||||
specifier: ^0.24.0
|
specifier: ^0.24.0
|
||||||
version: 0.24.0
|
version: 0.24.0
|
||||||
@@ -254,8 +254,8 @@ packages:
|
|||||||
'@protobufjs/utf8@1.1.0':
|
'@protobufjs/utf8@1.1.0':
|
||||||
resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==}
|
resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==}
|
||||||
|
|
||||||
'@types/node@22.7.7':
|
'@types/node@22.8.0':
|
||||||
resolution: {integrity: sha512-SRxCrrg9CL/y54aiMCG3edPKdprgMVGDXjA3gB8UmmBW5TcXzRUYAh8EWzTnSJFAd1rgImPELza+A3bJ+qxz8Q==}
|
resolution: {integrity: sha512-84rafSBHC/z1i1E3p0cJwKA+CfYDNSXX9WSZBRopjIzLET8oNt6ht2tei4C7izwDeEiLLfdeSVBv1egOH916hg==}
|
||||||
|
|
||||||
esbuild@0.24.0:
|
esbuild@0.24.0:
|
||||||
resolution: {integrity: sha512-FuLPevChGDshgSicjisSooU0cemp/sGXR841D5LHMB7mTVOmsEHcAxaH3irL53+8YDIeVNQEySh4DaYU/iuPqQ==}
|
resolution: {integrity: sha512-FuLPevChGDshgSicjisSooU0cemp/sGXR841D5LHMB7mTVOmsEHcAxaH3irL53+8YDIeVNQEySh4DaYU/iuPqQ==}
|
||||||
@@ -271,11 +271,11 @@ packages:
|
|||||||
long@5.2.3:
|
long@5.2.3:
|
||||||
resolution: {integrity: sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==}
|
resolution: {integrity: sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==}
|
||||||
|
|
||||||
onnxruntime-common@1.19.2:
|
onnxruntime-common@1.20.0:
|
||||||
resolution: {integrity: sha512-a4R7wYEVFbZBlp0BfhpbFWqe4opCor3KM+5Wm22Az3NGDcQMiU2hfG/0MfnBs+1ZrlSGmlgWeMcXQkDk1UFb8Q==}
|
resolution: {integrity: sha512-9ehS4ul5fBszIcHhfxuDgk45lO+Fqrxmrgwk1Pxb1JRvbQiCB/v9Royv95SRCWHktLMviqNjBsEd/biJhd39cg==}
|
||||||
|
|
||||||
onnxruntime-web@1.19.2:
|
onnxruntime-web@1.20.0:
|
||||||
resolution: {integrity: sha512-r0ok6KpTUXR4WA+rHvUiZn7JoH02e8iS7XE1p5bXk7q3E0UaRFfYvpMNUHqEPiTBMuIssfBxDCQjUihV8dDFPg==}
|
resolution: {integrity: sha512-IoUf8dqHFJLV4DUSz+Ok+xxyN6cQk57gb20m6PZE5gag3QXuvegYMq9dG8t/QF4JjTKIwvfvnr16ouzCCB9IMA==}
|
||||||
|
|
||||||
platform@1.3.6:
|
platform@1.3.6:
|
||||||
resolution: {integrity: sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg==}
|
resolution: {integrity: sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg==}
|
||||||
@@ -424,7 +424,7 @@ snapshots:
|
|||||||
|
|
||||||
'@protobufjs/utf8@1.1.0': {}
|
'@protobufjs/utf8@1.1.0': {}
|
||||||
|
|
||||||
'@types/node@22.7.7':
|
'@types/node@22.8.0':
|
||||||
dependencies:
|
dependencies:
|
||||||
undici-types: 6.19.8
|
undici-types: 6.19.8
|
||||||
|
|
||||||
@@ -461,14 +461,14 @@ snapshots:
|
|||||||
|
|
||||||
long@5.2.3: {}
|
long@5.2.3: {}
|
||||||
|
|
||||||
onnxruntime-common@1.19.2: {}
|
onnxruntime-common@1.20.0: {}
|
||||||
|
|
||||||
onnxruntime-web@1.19.2:
|
onnxruntime-web@1.20.0:
|
||||||
dependencies:
|
dependencies:
|
||||||
flatbuffers: 1.12.0
|
flatbuffers: 1.12.0
|
||||||
guid-typescript: 1.0.9
|
guid-typescript: 1.0.9
|
||||||
long: 5.2.3
|
long: 5.2.3
|
||||||
onnxruntime-common: 1.19.2
|
onnxruntime-common: 1.20.0
|
||||||
platform: 1.3.6
|
platform: 1.3.6
|
||||||
protobufjs: 7.4.0
|
protobufjs: 7.4.0
|
||||||
|
|
||||||
@@ -486,7 +486,7 @@ snapshots:
|
|||||||
'@protobufjs/path': 1.1.2
|
'@protobufjs/path': 1.1.2
|
||||||
'@protobufjs/pool': 1.1.0
|
'@protobufjs/pool': 1.1.0
|
||||||
'@protobufjs/utf8': 1.1.0
|
'@protobufjs/utf8': 1.1.0
|
||||||
'@types/node': 22.7.7
|
'@types/node': 22.8.0
|
||||||
long: 5.2.3
|
long: 5.2.3
|
||||||
|
|
||||||
typescript@5.6.3: {}
|
typescript@5.6.3: {}
|
||||||
|
|||||||
Reference in New Issue
Block a user