mirror of
https://github.com/neodyland/sbv2-api.git
synced 2025-12-23 15:59:57 +00:00
Compare commits
78 Commits
commit-adb
...
commit-1f9
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
02ac0885e0 | ||
|
|
1f96b09f3b | ||
|
|
d583c1ca1c | ||
|
|
c135aac852 | ||
|
|
f31fa1d4f9 | ||
|
|
efec7cce14 | ||
|
|
61914129dc | ||
|
|
97c63a2e23 | ||
|
|
3475f47305 | ||
|
|
5493b91a84 | ||
|
|
bca6d04e7b | ||
|
|
d44ebe873e | ||
|
|
96b53d42cd | ||
|
|
9765ef51d2 | ||
|
|
e68f58d698 | ||
|
|
2124fe4650 | ||
|
|
0217c0a4d5 | ||
|
|
1de09597f5 | ||
|
|
38d86c9249 | ||
|
|
ddc132b27b | ||
|
|
558cd24677 | ||
|
|
6657b06786 | ||
|
|
2a8c9bafde | ||
|
|
d7065ac6eb | ||
|
|
0b1dbe4991 | ||
|
|
1ad588bfcf | ||
|
|
9733ba95fa | ||
|
|
843c16995c | ||
|
|
f0821ea957 | ||
|
|
abc9cec7c7 | ||
|
|
19e6b7f0e6 | ||
|
|
451f4497b6 | ||
|
|
e5e92f6211 | ||
|
|
b835577325 | ||
|
|
3caf93441a | ||
|
|
4deefc596b | ||
|
|
9174aa9b11 | ||
|
|
6bccf0468b | ||
|
|
bbb3f0003b | ||
|
|
46de7a9d3f | ||
|
|
252b27de48 | ||
|
|
1dd3e02562 | ||
|
|
4990261ecd | ||
|
|
e873892223 | ||
|
|
f081b2ed22 | ||
|
|
103eb51ca8 | ||
|
|
01541ff381 | ||
|
|
70c2341afd | ||
|
|
a5d783bd65 | ||
|
|
633dfc305e | ||
|
|
53d7daf11a | ||
|
|
5abfe732e4 | ||
|
|
48aef6cef4 | ||
|
|
64fc74eee6 | ||
|
|
6e01103c5d | ||
|
|
00e95cd77c | ||
|
|
01f2aaa406 | ||
|
|
3785faf81e | ||
|
|
70e16f95ad | ||
|
|
a67df43fc7 | ||
|
|
472d1c600f | ||
|
|
acf94a1283 | ||
|
|
dd5c536f39 | ||
|
|
07637f587d | ||
|
|
e8dbf956e1 | ||
|
|
2687af1a9b | ||
|
|
e915e2bc84 | ||
|
|
22ed557395 | ||
|
|
b8f0477318 | ||
|
|
f4de3e15ae | ||
|
|
fc944b9d33 | ||
|
|
4255e15748 | ||
|
|
8bf3906105 | ||
|
|
1d80eda325 | ||
|
|
99a4b130af | ||
|
|
d430a6cb51 | ||
|
|
61aae68d2d | ||
|
|
abb40d4d2d |
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# To get started with Dependabot version updates, you'll need to specify which
|
||||||
|
# package ecosystems to update and where the package manifests are located.
|
||||||
|
# Please see the documentation for all configuration options:
|
||||||
|
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||||
|
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "cargo" # See documentation for possible values
|
||||||
|
directory: "/" # Location of package manifests
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: CI
|
name: Build
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -6,7 +6,6 @@ on:
|
|||||||
- main
|
- main
|
||||||
tags:
|
tags:
|
||||||
- '*'
|
- '*'
|
||||||
pull_request:
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
@@ -29,7 +28,7 @@ jobs:
|
|||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: 3.x
|
python-version: 3.x
|
||||||
- run: docker build . -f .github/workflows/CI.Dockerfile --tag ci
|
- run: docker build . -f .github/workflows/build.Dockerfile --tag ci
|
||||||
- name: Build wheels
|
- name: Build wheels
|
||||||
uses: PyO3/maturin-action@v1
|
uses: PyO3/maturin-action@v1
|
||||||
with:
|
with:
|
||||||
@@ -111,6 +110,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: wheels-sdist
|
name: wheels-sdist
|
||||||
path: ./crates/sbv2_bindings/dist
|
path: ./crates/sbv2_bindings/dist
|
||||||
|
|
||||||
python-wheel:
|
python-wheel:
|
||||||
name: Wheel Upload
|
name: Wheel Upload
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
26
.github/workflows/lint.yml
vendored
Normal file
26
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
name: Lint
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
components:
|
||||||
|
- rustfmt
|
||||||
|
- clippy
|
||||||
|
steps:
|
||||||
|
- name: Setup
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
with:
|
||||||
|
components: ${{ matrix.components }}
|
||||||
|
- name: Format
|
||||||
|
if: ${{ matrix.components == 'rustfmt' }}
|
||||||
|
run: cargo fmt --all -- --check
|
||||||
|
- name: Lint
|
||||||
|
if: ${{ matrix.components == 'clippy' }}
|
||||||
|
run: cargo clippy --all-targets --all-features -- -D warnings
|
||||||
826
Cargo.lock
generated
826
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
resolver = "2"
|
resolver = "3"
|
||||||
members = ["./crates/sbv2_api", "./crates/sbv2_core", "./crates/sbv2_bindings", "./crates/sbv2_wasm"]
|
members = ["./crates/sbv2_api", "./crates/sbv2_core", "./crates/sbv2_bindings", "./crates/sbv2_wasm"]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
@@ -8,7 +8,7 @@ edition = "2021"
|
|||||||
description = "Style-Bert-VITSの推論ライブラリ"
|
description = "Style-Bert-VITSの推論ライブラリ"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
readme = "./README.md"
|
readme = "./README.md"
|
||||||
repository = "https://github.com/tuna2134/sbv2-api"
|
repository = "https://github.com/neodyland/sbv2-api"
|
||||||
documentation = "https://docs.rs/sbv2_core"
|
documentation = "https://docs.rs/sbv2_core"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
|||||||
1
LICENSE
1
LICENSE
@@ -1,6 +1,7 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2024 tuna2134
|
Copyright (c) 2024 tuna2134
|
||||||
|
Copyright (c) 2025- neodyland
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
27
README.md
27
README.md
@@ -1,14 +1,23 @@
|
|||||||
# SBV2-API
|
# SBV2-API
|
||||||
|
|
||||||
## 注意:本バージョンはアルファ版です。
|
> [!CAUTION]
|
||||||
安定版を利用したい場合は[こちら](https://github.com/tuna2134/sbv2-api/tree/v0.1.x)をご覧ください。
|
> 本バージョンはアルファ版です。
|
||||||
|
>
|
||||||
|
> 安定版を利用したい場合は[こちら](https://github.com/neodyland/sbv2-api/tree/v0.1.x)をご覧ください。
|
||||||
|
|
||||||
## 注意: オプションの辞書はLGPLです。
|
> [!CAUTION]
|
||||||
オプションの辞書を使用する場合、バイナリの内部の辞書部分について、LGPLが適用されます。
|
> オプションの辞書はLGPLです。
|
||||||
|
>
|
||||||
|
> オプションの辞書を使用する場合、バイナリの内部の辞書部分について、LGPLが適用されます。
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> このレポジトリはメンテナンスの都合上、[tuna2134](https:://github.com/tuna2134)氏の所属する[Neodyland](https://neody.land/)へとリポジトリ所在地を移動しました。
|
||||||
|
>
|
||||||
|
> 引き続きtuna2134氏がメインメンテナとして管理しています。
|
||||||
|
|
||||||
## プログラミングに詳しくない方向け
|
## プログラミングに詳しくない方向け
|
||||||
|
|
||||||
[こちら](https://github.com/tuna2134/sbv2-gui?tab=readme-ov-file)を参照してください。
|
[こちら](https://github.com/tuna2134/sbv2-gui)を参照してください。
|
||||||
|
|
||||||
コマンドやpythonの知識なしで簡単に使えるバージョンです。(できることはほぼ同じ)
|
コマンドやpythonの知識なしで簡単に使えるバージョンです。(できることはほぼ同じ)
|
||||||
|
|
||||||
@@ -20,7 +29,7 @@ JP-Extra しか対応していません。(基本的に対応する予定もあ
|
|||||||
|
|
||||||
## 変換方法
|
## 変換方法
|
||||||
|
|
||||||
[こちら](https://github.com/tuna2134/sbv2-api/tree/main/scripts/convert)を参照してください。
|
[こちら](https://github.com/neodyland/sbv2-api/tree/main/scripts/convert)を参照してください。
|
||||||
|
|
||||||
## Todo
|
## Todo
|
||||||
|
|
||||||
@@ -66,7 +75,7 @@ CPUの場合は
|
|||||||
```sh
|
```sh
|
||||||
docker run -it --rm -p 3000:3000 --name sbv2 \
|
docker run -it --rm -p 3000:3000 --name sbv2 \
|
||||||
-v ./models:/work/models --env-file .env \
|
-v ./models:/work/models --env-file .env \
|
||||||
ghcr.io/tuna2134/sbv2-api:cpu
|
ghcr.io/neodyland/sbv2-api:cpu
|
||||||
```
|
```
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
@@ -81,7 +90,7 @@ CPUの場合は
|
|||||||
```bash
|
```bash
|
||||||
docker run --platform linux/amd64 -it --rm -p 3000:3000 --name sbv2 \
|
docker run --platform linux/amd64 -it --rm -p 3000:3000 --name sbv2 \
|
||||||
-v ./models:/work/models --env-file .env \
|
-v ./models:/work/models --env-file .env \
|
||||||
ghcr.io/tuna2134/sbv2-api:cpu
|
ghcr.io/neodyland/sbv2-api:cpu
|
||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@@ -90,7 +99,7 @@ CUDAの場合は
|
|||||||
docker run -it --rm -p 3000:3000 --name sbv2 \
|
docker run -it --rm -p 3000:3000 --name sbv2 \
|
||||||
-v ./models:/work/models --env-file .env \
|
-v ./models:/work/models --env-file .env \
|
||||||
--gpus all \
|
--gpus all \
|
||||||
ghcr.io/tuna2134/sbv2-api:cuda
|
ghcr.io/neodyland/sbv2-api:cuda
|
||||||
```
|
```
|
||||||
|
|
||||||
### 起動確認
|
### 起動確認
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ env_logger.workspace = true
|
|||||||
log = "0.4.22"
|
log = "0.4.22"
|
||||||
sbv2_core = { version = "0.2.0-alpha6", path = "../sbv2_core", features = ["aivmx"] }
|
sbv2_core = { version = "0.2.0-alpha6", path = "../sbv2_core", features = ["aivmx"] }
|
||||||
serde = { version = "1.0.210", features = ["derive"] }
|
serde = { version = "1.0.210", features = ["derive"] }
|
||||||
tokio = { version = "1.40.0", features = ["full"] }
|
tokio = { version = "1.45.1", features = ["full"] }
|
||||||
utoipa = { version = "5.0.0", features = ["axum_extras"] }
|
utoipa = { version = "5.4.0", features = ["axum_extras"] }
|
||||||
utoipa-scalar = { version = "0.3.0", features = ["axum"] }
|
utoipa-scalar = { version = "0.3.0", features = ["axum"] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
|||||||
@@ -53,12 +53,16 @@ struct SynthesizeRequest {
|
|||||||
text: String,
|
text: String,
|
||||||
ident: String,
|
ident: String,
|
||||||
#[serde(default = "sdp_default")]
|
#[serde(default = "sdp_default")]
|
||||||
|
#[schema(example = 0.0_f32)]
|
||||||
sdp_ratio: f32,
|
sdp_ratio: f32,
|
||||||
#[serde(default = "length_default")]
|
#[serde(default = "length_default")]
|
||||||
|
#[schema(example = 1.0_f32)]
|
||||||
length_scale: f32,
|
length_scale: f32,
|
||||||
#[serde(default = "style_id_default")]
|
#[serde(default = "style_id_default")]
|
||||||
|
#[schema(example = 0_i32)]
|
||||||
style_id: i32,
|
style_id: i32,
|
||||||
#[serde(default = "speaker_id_default")]
|
#[serde(default = "speaker_id_default")]
|
||||||
|
#[schema(example = 0_i64)]
|
||||||
speaker_id: i64,
|
speaker_id: i64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ crate-type = ["cdylib"]
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
ndarray.workspace = true
|
ndarray.workspace = true
|
||||||
pyo3 = { version = "0.23.0", features = ["anyhow"] }
|
pyo3 = { version = "0.25.1", features = ["anyhow"] }
|
||||||
sbv2_core = { path = "../sbv2_core", features = ["std"], default-features = false }
|
sbv2_core = { path = "../sbv2_core", features = ["std"], default-features = false }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
|||||||
@@ -136,6 +136,7 @@ impl TTSModel {
|
|||||||
/// -------
|
/// -------
|
||||||
/// voice_data : bytes
|
/// voice_data : bytes
|
||||||
/// 音声データ
|
/// 音声データ
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn synthesize<'p>(
|
fn synthesize<'p>(
|
||||||
&'p mut self,
|
&'p mut self,
|
||||||
py: Python<'p>,
|
py: Python<'p>,
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ env_logger.workspace = true
|
|||||||
hound = "3.5.1"
|
hound = "3.5.1"
|
||||||
jpreprocess = { version = "0.12.0", features = ["naist-jdic"] }
|
jpreprocess = { version = "0.12.0", features = ["naist-jdic"] }
|
||||||
ndarray.workspace = true
|
ndarray.workspace = true
|
||||||
npyz = { version = "0.8.3", optional = true }
|
npyz = { version = "0.8.4", optional = true }
|
||||||
num_cpus = "1.16.0"
|
num_cpus = "1.16.0"
|
||||||
once_cell.workspace = true
|
once_cell.workspace = true
|
||||||
ort = { git = "https://github.com/pykeio/ort.git", version = "2.0.0-rc.9", optional = true }
|
ort = { git = "https://github.com/pykeio/ort.git", version = "2.0.0-rc.9", optional = true }
|
||||||
@@ -44,4 +44,4 @@ base64 = ["dep:base64"]
|
|||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
dirs = "6.0.0"
|
dirs = "6.0.0"
|
||||||
ureq = "3.0.6"
|
ureq = "3.0.12"
|
||||||
|
|||||||
@@ -5,21 +5,27 @@ use std::io::copy;
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
let static_path = home_dir().unwrap().join(".cache/sbv2/all.bin");
|
let static_dir = home_dir().unwrap().join(".cache/sbv2");
|
||||||
|
let static_path = static_dir.join("all.bin");
|
||||||
let out_path = PathBuf::from(&env::var("OUT_DIR").unwrap()).join("all.bin");
|
let out_path = PathBuf::from(&env::var("OUT_DIR").unwrap()).join("all.bin");
|
||||||
println!("cargo:rerun-if-changed=build.rs");
|
println!("cargo:rerun-if-changed=build.rs");
|
||||||
if static_path.exists() {
|
if static_path.exists() {
|
||||||
if fs::hard_link(&static_path, &out_path).is_err() {
|
println!("cargo:info=Dictionary file already exists, skipping download.");
|
||||||
fs::copy(static_path, out_path).unwrap();
|
|
||||||
};
|
|
||||||
} else {
|
} else {
|
||||||
println!("cargo:warning=Downloading dictionary file...");
|
println!("cargo:warning=Downloading dictionary file...");
|
||||||
let mut response =
|
let mut response =
|
||||||
ureq::get("https://huggingface.co/neody/sbv2-api-assets/resolve/main/dic/all.bin")
|
ureq::get("https://huggingface.co/neody/sbv2-api-assets/resolve/main/dic/all.bin")
|
||||||
.call()?;
|
.call()?;
|
||||||
let mut response = response.body_mut().as_reader();
|
let mut response = response.body_mut().as_reader();
|
||||||
let mut file = fs::File::create(&out_path)?;
|
if !static_dir.exists() {
|
||||||
|
fs::create_dir_all(static_dir)?;
|
||||||
|
}
|
||||||
|
let mut file = fs::File::create(&static_path)?;
|
||||||
copy(&mut response, &mut file)?;
|
copy(&mut response, &mut file)?;
|
||||||
}
|
}
|
||||||
|
if !out_path.exists() && fs::hard_link(&static_path, &out_path).is_err() {
|
||||||
|
println!("cargo:warning=Failed to create hard link, copying instead.");
|
||||||
|
fs::copy(static_path, out_path)?;
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,11 +14,9 @@ pub fn predict(
|
|||||||
"attention_mask" => TensorRef::from_array_view((vec![1, attention_masks.len() as i64], attention_masks.as_slice()))?,
|
"attention_mask" => TensorRef::from_array_view((vec![1, attention_masks.len() as i64], attention_masks.as_slice()))?,
|
||||||
}
|
}
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let output = outputs["output"]
|
let output = outputs["output"]
|
||||||
.try_extract_tensor::<f32>()?
|
.try_extract_array::<f32>()?
|
||||||
.into_dimensionality::<Ix2>()?
|
.into_dimensionality::<Ix2>()?
|
||||||
.to_owned();
|
.to_owned();
|
||||||
|
|
||||||
Ok(output)
|
Ok(output)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,6 +28,8 @@ pub enum Error {
|
|||||||
Base64Error(#[from] base64::DecodeError),
|
Base64Error(#[from] base64::DecodeError),
|
||||||
#[error("other")]
|
#[error("other")]
|
||||||
OtherError(String),
|
OtherError(String),
|
||||||
|
#[error("Style error: {0}")]
|
||||||
|
StyleError(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::mora::{MORA_KATA_TO_MORA_PHONEMES, VOWELS};
|
use crate::mora::{CONSONANTS, MORA_KATA_TO_MORA_PHONEMES, MORA_PHONEMES_TO_MORA_KATA, VOWELS};
|
||||||
use crate::norm::{replace_punctuation, PUNCTUATIONS};
|
use crate::norm::{replace_punctuation, PUNCTUATIONS};
|
||||||
use jpreprocess::{kind, DefaultTokenizer, JPreprocess, SystemDictionaryConfig, UserDictionary};
|
use jpreprocess::{kind, DefaultTokenizer, JPreprocess, SystemDictionaryConfig, UserDictionary};
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
@@ -76,6 +76,34 @@ static MORA_PATTERN: Lazy<Vec<String>> = Lazy::new(|| {
|
|||||||
});
|
});
|
||||||
static LONG_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r"(\w)(ー*)").unwrap());
|
static LONG_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r"(\w)(ー*)").unwrap());
|
||||||
|
|
||||||
|
fn phone_tone_to_kana(phones: Vec<String>, tones: Vec<i32>) -> Vec<(String, i32)> {
|
||||||
|
let phones = &phones[1..];
|
||||||
|
let tones = &tones[1..];
|
||||||
|
let mut results = Vec::new();
|
||||||
|
let mut current_mora = String::new();
|
||||||
|
for ((phone, _next_phone), (&tone, &next_tone)) in phones
|
||||||
|
.iter()
|
||||||
|
.zip(phones.iter().skip(1))
|
||||||
|
.zip(tones.iter().zip(tones.iter().skip(1)))
|
||||||
|
{
|
||||||
|
if PUNCTUATIONS.contains(&phone.clone().as_str()) {
|
||||||
|
results.push((phone.to_string(), tone));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if CONSONANTS.contains(&phone.clone()) {
|
||||||
|
assert_eq!(current_mora, "");
|
||||||
|
assert_eq!(tone, next_tone);
|
||||||
|
current_mora = phone.to_string()
|
||||||
|
} else {
|
||||||
|
current_mora += phone;
|
||||||
|
let kana = MORA_PHONEMES_TO_MORA_KATA.get(¤t_mora).unwrap();
|
||||||
|
results.push((kana.to_string(), tone));
|
||||||
|
current_mora = String::new();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
results
|
||||||
|
}
|
||||||
|
|
||||||
pub struct JTalkProcess {
|
pub struct JTalkProcess {
|
||||||
jpreprocess: Arc<JPreprocessType>,
|
jpreprocess: Arc<JPreprocessType>,
|
||||||
parsed: Vec<String>,
|
parsed: Vec<String>,
|
||||||
@@ -165,6 +193,11 @@ impl JTalkProcess {
|
|||||||
Ok((phones, tones, new_word2ph))
|
Ok((phones, tones, new_word2ph))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn g2kana_tone(&self) -> Result<Vec<(String, i32)>> {
|
||||||
|
let (phones, tones, _) = self.g2p()?;
|
||||||
|
Ok(phone_tone_to_kana(phones, tones))
|
||||||
|
}
|
||||||
|
|
||||||
fn distribute_phone(n_phone: i32, n_word: i32) -> Vec<i32> {
|
fn distribute_phone(n_phone: i32, n_word: i32) -> Vec<i32> {
|
||||||
let mut phones_per_word = vec![0; n_word as usize];
|
let mut phones_per_word = vec![0; n_word as usize];
|
||||||
for _ in 0..n_phone {
|
for _ in 0..n_phone {
|
||||||
|
|||||||
@@ -30,8 +30,7 @@ fn main_inner() -> anyhow::Result<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let audio =
|
let audio = tts_holder.easy_synthesize(ident, text, 0, 0, tts::SynthesizeOptions::default())?;
|
||||||
tts_holder.easy_synthesize(ident, &text, 0, 0, tts::SynthesizeOptions::default())?;
|
|
||||||
fs::write("output.wav", audio)?;
|
fs::write("output.wav", audio)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -21,10 +21,7 @@ pub fn load_model<P: AsRef<[u8]>>(model_file: P, bert: bool) -> Result<Session>
|
|||||||
#[cfg(feature = "cuda")]
|
#[cfg(feature = "cuda")]
|
||||||
{
|
{
|
||||||
#[allow(unused_mut)]
|
#[allow(unused_mut)]
|
||||||
let mut cuda = ort::execution_providers::CUDAExecutionProvider::default()
|
let mut cuda = ort::execution_providers::CUDAExecutionProvider::default();
|
||||||
.with_conv_algorithm_search(
|
|
||||||
ort::execution_providers::cuda::CUDAExecutionProviderCuDNNConvAlgoSearch::Default,
|
|
||||||
);
|
|
||||||
#[cfg(feature = "cuda_tf32")]
|
#[cfg(feature = "cuda_tf32")]
|
||||||
{
|
{
|
||||||
cuda = cuda.with_tf32(true);
|
cuda = cuda.with_tf32(true);
|
||||||
@@ -101,11 +98,9 @@ pub fn synthesize(
|
|||||||
"noise_scale" => noise_scale,
|
"noise_scale" => noise_scale,
|
||||||
"noise_scale_w" => noise_scale_w,
|
"noise_scale_w" => noise_scale_w,
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let audio_array = outputs["output"]
|
let audio_array = outputs["output"]
|
||||||
.try_extract_tensor::<f32>()?
|
.try_extract_array::<f32>()?
|
||||||
.into_dimensionality::<Ix3>()?
|
.into_dimensionality::<Ix3>()?
|
||||||
.to_owned();
|
.to_owned();
|
||||||
|
|
||||||
Ok(audio_array)
|
Ok(audio_array)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,21 @@ static MORA_LIST_ADDITIONAL: Lazy<Vec<Mora>> = Lazy::new(|| {
|
|||||||
data.additional
|
data.additional
|
||||||
});
|
});
|
||||||
|
|
||||||
|
pub static MORA_PHONEMES_TO_MORA_KATA: Lazy<HashMap<String, String>> = Lazy::new(|| {
|
||||||
|
let mut map = HashMap::new();
|
||||||
|
for mora in MORA_LIST_MINIMUM.iter() {
|
||||||
|
map.insert(
|
||||||
|
format!(
|
||||||
|
"{}{}",
|
||||||
|
mora.consonant.clone().unwrap_or("".to_string()),
|
||||||
|
mora.vowel
|
||||||
|
),
|
||||||
|
mora.mora.clone(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
map
|
||||||
|
});
|
||||||
|
|
||||||
pub static MORA_KATA_TO_MORA_PHONEMES: Lazy<HashMap<String, (Option<String>, String)>> =
|
pub static MORA_KATA_TO_MORA_PHONEMES: Lazy<HashMap<String, (Option<String>, String)>> =
|
||||||
Lazy::new(|| {
|
Lazy::new(|| {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
@@ -37,4 +52,12 @@ pub static MORA_KATA_TO_MORA_PHONEMES: Lazy<HashMap<String, (Option<String>, Str
|
|||||||
map
|
map
|
||||||
});
|
});
|
||||||
|
|
||||||
|
pub static CONSONANTS: Lazy<Vec<String>> = Lazy::new(|| {
|
||||||
|
let consonants = MORA_KATA_TO_MORA_PHONEMES
|
||||||
|
.values()
|
||||||
|
.filter_map(|(consonant, _)| consonant.clone())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
consonants
|
||||||
|
});
|
||||||
|
|
||||||
pub const VOWELS: [&str; 6] = ["a", "i", "u", "e", "o", "N"];
|
pub const VOWELS: [&str; 6] = ["a", "i", "u", "e", "o", "N"];
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use crate::error::Result;
|
use crate::error::{Error, Result};
|
||||||
use ndarray::{s, Array1, Array2};
|
use ndarray::{s, Array1, Array2};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
@@ -21,6 +21,18 @@ pub fn get_style_vector(
|
|||||||
style_id: i32,
|
style_id: i32,
|
||||||
weight: f32,
|
weight: f32,
|
||||||
) -> Result<Array1<f32>> {
|
) -> Result<Array1<f32>> {
|
||||||
|
if style_vectors.shape().len() != 2 {
|
||||||
|
return Err(Error::StyleError(
|
||||||
|
"Invalid shape for style vectors".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if style_id < 0 || style_id >= style_vectors.shape()[0] as i32 {
|
||||||
|
return Err(Error::StyleError(format!(
|
||||||
|
"Invalid style ID: {}. Max ID: {}",
|
||||||
|
style_id,
|
||||||
|
style_vectors.shape()[0] - 1
|
||||||
|
)));
|
||||||
|
}
|
||||||
let mean = style_vectors.slice(s![0, ..]).to_owned();
|
let mean = style_vectors.slice(s![0, ..]).to_owned();
|
||||||
let style_vector = style_vectors.slice(s![style_id as usize, ..]).to_owned();
|
let style_vector = style_vectors.slice(s![style_id as usize, ..]).to_owned();
|
||||||
let diff = (style_vector - &mean) * weight;
|
let diff = (style_vector - &mean) * weight;
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ pub struct TTSModelHolder {
|
|||||||
tokenizer: Tokenizer,
|
tokenizer: Tokenizer,
|
||||||
bert: Session,
|
bert: Session,
|
||||||
models: Vec<TTSModel>,
|
models: Vec<TTSModel>,
|
||||||
jtalk: jtalk::JTalk,
|
pub jtalk: jtalk::JTalk,
|
||||||
max_loaded_models: Option<usize>,
|
max_loaded_models: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -205,6 +205,24 @@ impl TTSModelHolder {
|
|||||||
) -> Result<(Array2<f32>, Array1<i64>, Array1<i64>, Array1<i64>)> {
|
) -> Result<(Array2<f32>, Array1<i64>, Array1<i64>, Array1<i64>)> {
|
||||||
crate::tts_util::parse_text_blocking(
|
crate::tts_util::parse_text_blocking(
|
||||||
text,
|
text,
|
||||||
|
None,
|
||||||
|
&self.jtalk,
|
||||||
|
&self.tokenizer,
|
||||||
|
|token_ids, attention_masks| {
|
||||||
|
crate::bert::predict(&mut self.bert, token_ids, attention_masks)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
|
pub fn parse_text_neo(
|
||||||
|
&mut self,
|
||||||
|
text: String,
|
||||||
|
given_tones: Option<Vec<i32>>,
|
||||||
|
) -> Result<(Array2<f32>, Array1<i64>, Array1<i64>, Array1<i64>)> {
|
||||||
|
crate::tts_util::parse_text_blocking(
|
||||||
|
&text,
|
||||||
|
given_tones,
|
||||||
&self.jtalk,
|
&self.jtalk,
|
||||||
&self.tokenizer,
|
&self.tokenizer,
|
||||||
|token_ids, attention_masks| {
|
|token_ids, attention_masks| {
|
||||||
@@ -347,6 +365,79 @@ impl TTSModelHolder {
|
|||||||
};
|
};
|
||||||
tts_util::array_to_vec(audio_array)
|
tts_util::array_to_vec(audio_array)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn easy_synthesize_neo<I: Into<TTSIdent> + Copy>(
|
||||||
|
&mut self,
|
||||||
|
ident: I,
|
||||||
|
text: &str,
|
||||||
|
given_tones: Option<Vec<i32>>,
|
||||||
|
style_id: i32,
|
||||||
|
speaker_id: i64,
|
||||||
|
options: SynthesizeOptions,
|
||||||
|
) -> Result<Vec<u8>> {
|
||||||
|
self.find_and_load_model(ident)?;
|
||||||
|
let style_vector = self.get_style_vector(ident, style_id, options.style_weight)?;
|
||||||
|
let audio_array = if options.split_sentences {
|
||||||
|
let texts: Vec<&str> = text.split('\n').collect();
|
||||||
|
let mut audios = vec![];
|
||||||
|
for (i, t) in texts.iter().enumerate() {
|
||||||
|
if t.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let (bert_ori, phones, tones, lang_ids) =
|
||||||
|
self.parse_text_neo(t.to_string(), given_tones.clone())?;
|
||||||
|
|
||||||
|
let vits2 = self
|
||||||
|
.find_model(ident)?
|
||||||
|
.vits2
|
||||||
|
.as_mut()
|
||||||
|
.ok_or(Error::ModelNotFoundError(ident.into().to_string()))?;
|
||||||
|
let audio = model::synthesize(
|
||||||
|
vits2,
|
||||||
|
bert_ori.to_owned(),
|
||||||
|
phones,
|
||||||
|
Array1::from_vec(vec![speaker_id]),
|
||||||
|
tones,
|
||||||
|
lang_ids,
|
||||||
|
style_vector.clone(),
|
||||||
|
options.sdp_ratio,
|
||||||
|
options.length_scale,
|
||||||
|
0.677,
|
||||||
|
0.8,
|
||||||
|
)?;
|
||||||
|
audios.push(audio.clone());
|
||||||
|
if i != texts.len() - 1 {
|
||||||
|
audios.push(Array3::zeros((1, 1, 22050)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
concatenate(
|
||||||
|
Axis(2),
|
||||||
|
&audios.iter().map(|x| x.view()).collect::<Vec<_>>(),
|
||||||
|
)?
|
||||||
|
} else {
|
||||||
|
let (bert_ori, phones, tones, lang_ids) = self.parse_text(text)?;
|
||||||
|
|
||||||
|
let vits2 = self
|
||||||
|
.find_model(ident)?
|
||||||
|
.vits2
|
||||||
|
.as_mut()
|
||||||
|
.ok_or(Error::ModelNotFoundError(ident.into().to_string()))?;
|
||||||
|
model::synthesize(
|
||||||
|
vits2,
|
||||||
|
bert_ori.to_owned(),
|
||||||
|
phones,
|
||||||
|
Array1::from_vec(vec![speaker_id]),
|
||||||
|
tones,
|
||||||
|
lang_ids,
|
||||||
|
style_vector,
|
||||||
|
options.sdp_ratio,
|
||||||
|
options.length_scale,
|
||||||
|
0.677,
|
||||||
|
0.8,
|
||||||
|
)?
|
||||||
|
};
|
||||||
|
tts_util::array_to_vec(audio_array)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Synthesize options
|
/// Synthesize options
|
||||||
|
|||||||
@@ -1,10 +1,22 @@
|
|||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
use crate::jtalk::JTalkProcess;
|
||||||
|
use crate::mora::MORA_KATA_TO_MORA_PHONEMES;
|
||||||
|
use crate::norm::PUNCTUATIONS;
|
||||||
use crate::{jtalk, nlp, norm, tokenizer, utils};
|
use crate::{jtalk, nlp, norm, tokenizer, utils};
|
||||||
use hound::{SampleFormat, WavSpec, WavWriter};
|
use hound::{SampleFormat, WavSpec, WavWriter};
|
||||||
use ndarray::{concatenate, s, Array, Array1, Array2, Array3, Axis};
|
use ndarray::{concatenate, s, Array, Array1, Array2, Array3, Axis};
|
||||||
use tokenizers::Tokenizer;
|
use tokenizers::Tokenizer;
|
||||||
|
|
||||||
|
pub fn preprocess_parse_text(text: &str, jtalk: &jtalk::JTalk) -> Result<(String, JTalkProcess)> {
|
||||||
|
let text = jtalk.num2word(text)?;
|
||||||
|
let normalized_text = norm::normalize_text(&text);
|
||||||
|
|
||||||
|
let process = jtalk.process_text(&normalized_text)?;
|
||||||
|
Ok((normalized_text, process))
|
||||||
|
}
|
||||||
|
|
||||||
/// Parse text and return the input for synthesize
|
/// Parse text and return the input for synthesize
|
||||||
///
|
///
|
||||||
/// # Note
|
/// # Note
|
||||||
@@ -21,13 +33,9 @@ pub async fn parse_text(
|
|||||||
Box<dyn std::future::Future<Output = Result<ndarray::Array2<f32>>>>,
|
Box<dyn std::future::Future<Output = Result<ndarray::Array2<f32>>>>,
|
||||||
>,
|
>,
|
||||||
) -> Result<(Array2<f32>, Array1<i64>, Array1<i64>, Array1<i64>)> {
|
) -> Result<(Array2<f32>, Array1<i64>, Array1<i64>, Array1<i64>)> {
|
||||||
let text = jtalk.num2word(text)?;
|
let (normalized_text, process) = preprocess_parse_text(text, jtalk)?;
|
||||||
let normalized_text = norm::normalize_text(&text);
|
|
||||||
|
|
||||||
let process = jtalk.process_text(&normalized_text)?;
|
|
||||||
let (phones, tones, mut word2ph) = process.g2p()?;
|
let (phones, tones, mut word2ph) = process.g2p()?;
|
||||||
let (phones, tones, lang_ids) = nlp::cleaned_text_to_sequence(phones, tones);
|
let (phones, tones, lang_ids) = nlp::cleaned_text_to_sequence(phones, tones);
|
||||||
|
|
||||||
let phones = utils::intersperse(&phones, 0);
|
let phones = utils::intersperse(&phones, 0);
|
||||||
let tones = utils::intersperse(&tones, 0);
|
let tones = utils::intersperse(&tones, 0);
|
||||||
let lang_ids = utils::intersperse(&lang_ids, 0);
|
let lang_ids = utils::intersperse(&lang_ids, 0);
|
||||||
@@ -92,6 +100,7 @@ pub async fn parse_text(
|
|||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
pub fn parse_text_blocking(
|
pub fn parse_text_blocking(
|
||||||
text: &str,
|
text: &str,
|
||||||
|
given_tones: Option<Vec<i32>>,
|
||||||
jtalk: &jtalk::JTalk,
|
jtalk: &jtalk::JTalk,
|
||||||
tokenizer: &Tokenizer,
|
tokenizer: &Tokenizer,
|
||||||
bert_predict: impl FnOnce(Vec<i64>, Vec<i64>) -> Result<ndarray::Array2<f32>>,
|
bert_predict: impl FnOnce(Vec<i64>, Vec<i64>) -> Result<ndarray::Array2<f32>>,
|
||||||
@@ -100,7 +109,10 @@ pub fn parse_text_blocking(
|
|||||||
let normalized_text = norm::normalize_text(&text);
|
let normalized_text = norm::normalize_text(&text);
|
||||||
|
|
||||||
let process = jtalk.process_text(&normalized_text)?;
|
let process = jtalk.process_text(&normalized_text)?;
|
||||||
let (phones, tones, mut word2ph) = process.g2p()?;
|
let (phones, mut tones, mut word2ph) = process.g2p()?;
|
||||||
|
if let Some(given_tones) = given_tones {
|
||||||
|
tones = given_tones;
|
||||||
|
}
|
||||||
let (phones, tones, lang_ids) = nlp::cleaned_text_to_sequence(phones, tones);
|
let (phones, tones, lang_ids) = nlp::cleaned_text_to_sequence(phones, tones);
|
||||||
|
|
||||||
let phones = utils::intersperse(&phones, 0);
|
let phones = utils::intersperse(&phones, 0);
|
||||||
@@ -178,3 +190,23 @@ pub fn array_to_vec(audio_array: Array3<f32>) -> Result<Vec<u8>> {
|
|||||||
writer.finalize()?;
|
writer.finalize()?;
|
||||||
Ok(cursor.into_inner())
|
Ok(cursor.into_inner())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn kata_tone2phone_tone(kata_tone: Vec<(String, i32)>) -> Vec<(String, i32)> {
|
||||||
|
let mut results = vec![("_".to_string(), 0)];
|
||||||
|
for (mora, tone) in kata_tone {
|
||||||
|
if PUNCTUATIONS.contains(&mora.as_str()) {
|
||||||
|
results.push((mora, 0));
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
let (consonant, vowel) = MORA_KATA_TO_MORA_PHONEMES.get(&mora).unwrap();
|
||||||
|
if let Some(consonant) = consonant {
|
||||||
|
results.push((consonant.to_string(), tone));
|
||||||
|
results.push((vowel.to_string(), tone));
|
||||||
|
} else {
|
||||||
|
results.push((vowel.to_string(), tone));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
results.push(("_".to_string(), 0));
|
||||||
|
results
|
||||||
|
}
|
||||||
|
|||||||
19
crates/sbv2_editor/Cargo.toml
Normal file
19
crates/sbv2_editor/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
[package]
|
||||||
|
name = "sbv2_editor"
|
||||||
|
version.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
description.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
documentation.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow.workspace = true
|
||||||
|
axum = "0.8.1"
|
||||||
|
dotenvy.workspace = true
|
||||||
|
env_logger.workspace = true
|
||||||
|
log = "0.4.27"
|
||||||
|
sbv2_core = { version = "0.2.0-alpha6", path = "../sbv2_core", features = ["aivmx"] }
|
||||||
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
|
tokio = { version = "1.44.1", features = ["full"] }
|
||||||
2
crates/sbv2_editor/README.md
Normal file
2
crates/sbv2_editor/README.md
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
# sbv2-voicevox
|
||||||
|
sbv2-apiをvoicevox化します。
|
||||||
226
crates/sbv2_editor/query2.json
Normal file
226
crates/sbv2_editor/query2.json
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
{
|
||||||
|
"accent_phrases": [
|
||||||
|
{
|
||||||
|
"moras": [
|
||||||
|
{
|
||||||
|
"text": "コ",
|
||||||
|
"consonant": "k",
|
||||||
|
"consonant_length": 0.10002632439136505,
|
||||||
|
"vowel": "o",
|
||||||
|
"vowel_length": 0.15740256011486053,
|
||||||
|
"pitch": 5.749961853027344
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "ン",
|
||||||
|
"consonant": null,
|
||||||
|
"consonant_length": null,
|
||||||
|
"vowel": "N",
|
||||||
|
"vowel_length": 0.08265873789787292,
|
||||||
|
"pitch": 5.89122200012207
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "ニ",
|
||||||
|
"consonant": "n",
|
||||||
|
"consonant_length": 0.03657080978155136,
|
||||||
|
"vowel": "i",
|
||||||
|
"vowel_length": 0.1175866425037384,
|
||||||
|
"pitch": 5.969866752624512
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "チ",
|
||||||
|
"consonant": "ch",
|
||||||
|
"consonant_length": 0.09005842357873917,
|
||||||
|
"vowel": "i",
|
||||||
|
"vowel_length": 0.08666137605905533,
|
||||||
|
"pitch": 5.958892822265625
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "ワ",
|
||||||
|
"consonant": "w",
|
||||||
|
"consonant_length": 0.07833231985569,
|
||||||
|
"vowel": "a",
|
||||||
|
"vowel_length": 0.21250136196613312,
|
||||||
|
"pitch": 5.949411392211914
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"accent": 5,
|
||||||
|
"pause_mora": {
|
||||||
|
"text": "、",
|
||||||
|
"consonant": null,
|
||||||
|
"consonant_length": null,
|
||||||
|
"vowel": "pau",
|
||||||
|
"vowel_length": 0.4723339378833771,
|
||||||
|
"pitch": 0.0
|
||||||
|
},
|
||||||
|
"is_interrogative": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"moras": [
|
||||||
|
{
|
||||||
|
"text": "オ",
|
||||||
|
"consonant": null,
|
||||||
|
"consonant_length": null,
|
||||||
|
"vowel": "o",
|
||||||
|
"vowel_length": 0.22004225850105286,
|
||||||
|
"pitch": 5.6870927810668945
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "ン",
|
||||||
|
"consonant": null,
|
||||||
|
"consonant_length": null,
|
||||||
|
"vowel": "N",
|
||||||
|
"vowel_length": 0.09161105751991272,
|
||||||
|
"pitch": 5.93472957611084
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "セ",
|
||||||
|
"consonant": "s",
|
||||||
|
"consonant_length": 0.08924821764230728,
|
||||||
|
"vowel": "e",
|
||||||
|
"vowel_length": 0.14142127335071564,
|
||||||
|
"pitch": 6.121850490570068
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "エ",
|
||||||
|
"consonant": null,
|
||||||
|
"consonant_length": null,
|
||||||
|
"vowel": "e",
|
||||||
|
"vowel_length": 0.10636933892965317,
|
||||||
|
"pitch": 6.157896041870117
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "ゴ",
|
||||||
|
"consonant": "g",
|
||||||
|
"consonant_length": 0.07600915431976318,
|
||||||
|
"vowel": "o",
|
||||||
|
"vowel_length": 0.09598273783922195,
|
||||||
|
"pitch": 6.188933849334717
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "オ",
|
||||||
|
"consonant": null,
|
||||||
|
"consonant_length": null,
|
||||||
|
"vowel": "o",
|
||||||
|
"vowel_length": 0.1079121008515358,
|
||||||
|
"pitch": 6.235202789306641
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "セ",
|
||||||
|
"consonant": "s",
|
||||||
|
"consonant_length": 0.09591838717460632,
|
||||||
|
"vowel": "e",
|
||||||
|
"vowel_length": 0.10286372154951096,
|
||||||
|
"pitch": 6.153214454650879
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "エ",
|
||||||
|
"consonant": null,
|
||||||
|
"consonant_length": null,
|
||||||
|
"vowel": "e",
|
||||||
|
"vowel_length": 0.08992656320333481,
|
||||||
|
"pitch": 6.02571439743042
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "ノ",
|
||||||
|
"consonant": "n",
|
||||||
|
"consonant_length": 0.05660202354192734,
|
||||||
|
"vowel": "o",
|
||||||
|
"vowel_length": 0.09676017612218857,
|
||||||
|
"pitch": 5.711844444274902
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"accent": 5,
|
||||||
|
"pause_mora": null,
|
||||||
|
"is_interrogative": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"moras": [
|
||||||
|
{
|
||||||
|
"text": "セ",
|
||||||
|
"consonant": "s",
|
||||||
|
"consonant_length": 0.07805486768484116,
|
||||||
|
"vowel": "e",
|
||||||
|
"vowel_length": 0.09617523103952408,
|
||||||
|
"pitch": 5.774399280548096
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "カ",
|
||||||
|
"consonant": "k",
|
||||||
|
"consonant_length": 0.06712044775485992,
|
||||||
|
"vowel": "a",
|
||||||
|
"vowel_length": 0.148829385638237,
|
||||||
|
"pitch": 6.063965797424316
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "イ",
|
||||||
|
"consonant": null,
|
||||||
|
"consonant_length": null,
|
||||||
|
"vowel": "i",
|
||||||
|
"vowel_length": 0.11061104387044907,
|
||||||
|
"pitch": 6.040698051452637
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "エ",
|
||||||
|
"consonant": null,
|
||||||
|
"consonant_length": null,
|
||||||
|
"vowel": "e",
|
||||||
|
"vowel_length": 0.13046696782112122,
|
||||||
|
"pitch": 5.806027889251709
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"accent": 1,
|
||||||
|
"pause_mora": null,
|
||||||
|
"is_interrogative": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"moras": [
|
||||||
|
{
|
||||||
|
"text": "ヨ",
|
||||||
|
"consonant": "y",
|
||||||
|
"consonant_length": 0.07194744795560837,
|
||||||
|
"vowel": "o",
|
||||||
|
"vowel_length": 0.08622600883245468,
|
||||||
|
"pitch": 5.694094657897949
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "オ",
|
||||||
|
"consonant": null,
|
||||||
|
"consonant_length": null,
|
||||||
|
"vowel": "o",
|
||||||
|
"vowel_length": 0.10635452717542648,
|
||||||
|
"pitch": 5.787222385406494
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "コ",
|
||||||
|
"consonant": "k",
|
||||||
|
"consonant_length": 0.07077334076166153,
|
||||||
|
"vowel": "o",
|
||||||
|
"vowel_length": 0.09248624742031097,
|
||||||
|
"pitch": 5.793357849121094
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "ソ",
|
||||||
|
"consonant": "s",
|
||||||
|
"consonant_length": 0.08705667406320572,
|
||||||
|
"vowel": "o",
|
||||||
|
"vowel_length": 0.2238258570432663,
|
||||||
|
"pitch": 5.643765449523926
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"accent": 1,
|
||||||
|
"pause_mora": null,
|
||||||
|
"is_interrogative": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"speedScale": 1.0,
|
||||||
|
"pitchScale": 0.0,
|
||||||
|
"intonationScale": 1.0,
|
||||||
|
"volumeScale": 1.0,
|
||||||
|
"prePhonemeLength": 0.1,
|
||||||
|
"postPhonemeLength": 0.1,
|
||||||
|
"pauseLength": null,
|
||||||
|
"pauseLengthScale": 1.0,
|
||||||
|
"outputSamplingRate": 24000,
|
||||||
|
"outputStereo": false,
|
||||||
|
"kana": "コンニチワ'、オンセエゴ'オセエノ/セ'カイエ/ヨ'オコソ"
|
||||||
|
}
|
||||||
27
crates/sbv2_editor/src/error.rs
Normal file
27
crates/sbv2_editor/src/error.rs
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
use axum::{
|
||||||
|
http::StatusCode,
|
||||||
|
response::{IntoResponse, Response},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub type AppResult<T> = std::result::Result<T, AppError>;
|
||||||
|
|
||||||
|
pub struct AppError(anyhow::Error);
|
||||||
|
|
||||||
|
impl IntoResponse for AppError {
|
||||||
|
fn into_response(self) -> Response {
|
||||||
|
(
|
||||||
|
StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
format!("Something went wrong: {}", self.0),
|
||||||
|
)
|
||||||
|
.into_response()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E> From<E> for AppError
|
||||||
|
where
|
||||||
|
E: Into<anyhow::Error>,
|
||||||
|
{
|
||||||
|
fn from(err: E) -> Self {
|
||||||
|
Self(err.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
197
crates/sbv2_editor/src/main.rs
Normal file
197
crates/sbv2_editor/src/main.rs
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
use axum::extract::State;
|
||||||
|
use axum::{
|
||||||
|
extract::Query,
|
||||||
|
http::header::CONTENT_TYPE,
|
||||||
|
response::IntoResponse,
|
||||||
|
routing::{get, post},
|
||||||
|
Json, Router,
|
||||||
|
};
|
||||||
|
use sbv2_core::tts_util::kata_tone2phone_tone;
|
||||||
|
use sbv2_core::{
|
||||||
|
tts::{SynthesizeOptions, TTSModelHolder},
|
||||||
|
tts_util::preprocess_parse_text,
|
||||||
|
};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::{fs, net::TcpListener, sync::Mutex};
|
||||||
|
|
||||||
|
use std::env;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use error::AppResult;
|
||||||
|
|
||||||
|
mod error;
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct RequestCreateAudioQuery {
|
||||||
|
text: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct AudioQuery {
|
||||||
|
kana: String,
|
||||||
|
tone: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct ResponseCreateAudioQuery {
|
||||||
|
audio_query: Vec<AudioQuery>,
|
||||||
|
text: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_audio_query(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
Query(request): Query<RequestCreateAudioQuery>,
|
||||||
|
) -> AppResult<impl IntoResponse> {
|
||||||
|
let (text, process) = {
|
||||||
|
let tts_model = state.tts_model.lock().await;
|
||||||
|
preprocess_parse_text(&request.text, &tts_model.jtalk)?
|
||||||
|
};
|
||||||
|
let kana_tone_list = process.g2kana_tone()?;
|
||||||
|
let audio_query = kana_tone_list
|
||||||
|
.iter()
|
||||||
|
.map(|(kana, tone)| AudioQuery {
|
||||||
|
kana: kana.clone(),
|
||||||
|
tone: *tone,
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
Ok(Json(ResponseCreateAudioQuery { audio_query, text }))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
pub struct RequestSynthesis {
|
||||||
|
text: String,
|
||||||
|
speaker_id: i64,
|
||||||
|
sdp_ratio: f32,
|
||||||
|
length_scale: f32,
|
||||||
|
style_id: i32,
|
||||||
|
audio_query: Vec<AudioQuery>,
|
||||||
|
ident: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn synthesis(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
Json(request): Json<RequestSynthesis>,
|
||||||
|
) -> AppResult<impl IntoResponse> {
|
||||||
|
let phone_tone = request
|
||||||
|
.audio_query
|
||||||
|
.iter()
|
||||||
|
.map(|query| (query.kana.clone(), query.tone))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let phone_tone = kata_tone2phone_tone(phone_tone);
|
||||||
|
let tones = phone_tone.iter().map(|(_, tone)| *tone).collect::<Vec<_>>();
|
||||||
|
let buffer = {
|
||||||
|
let mut tts_model = state.tts_model.lock().await;
|
||||||
|
tts_model.easy_synthesize_neo(
|
||||||
|
&request.ident,
|
||||||
|
&request.text,
|
||||||
|
Some(tones),
|
||||||
|
request.style_id,
|
||||||
|
request.speaker_id,
|
||||||
|
SynthesizeOptions {
|
||||||
|
sdp_ratio: request.sdp_ratio,
|
||||||
|
length_scale: request.length_scale,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)?
|
||||||
|
};
|
||||||
|
Ok(([(CONTENT_TYPE, "audio/wav")], buffer))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct AppState {
|
||||||
|
tts_model: Arc<Mutex<TTSModelHolder>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AppState {
|
||||||
|
pub async fn new() -> anyhow::Result<Self> {
|
||||||
|
let mut tts_model = TTSModelHolder::new(
|
||||||
|
&fs::read(env::var("BERT_MODEL_PATH")?).await?,
|
||||||
|
&fs::read(env::var("TOKENIZER_PATH")?).await?,
|
||||||
|
env::var("HOLDER_MAX_LOADED_MODElS")
|
||||||
|
.ok()
|
||||||
|
.and_then(|x| x.parse().ok()),
|
||||||
|
)?;
|
||||||
|
let models = env::var("MODELS_PATH").unwrap_or("models".to_string());
|
||||||
|
let mut f = fs::read_dir(&models).await?;
|
||||||
|
let mut entries = vec![];
|
||||||
|
while let Ok(Some(e)) = f.next_entry().await {
|
||||||
|
let name = e.file_name().to_string_lossy().to_string();
|
||||||
|
if name.ends_with(".onnx") && name.starts_with("model_") {
|
||||||
|
let name_len = name.len();
|
||||||
|
let name = name.chars();
|
||||||
|
entries.push(
|
||||||
|
name.collect::<Vec<_>>()[6..name_len - 5]
|
||||||
|
.iter()
|
||||||
|
.collect::<String>(),
|
||||||
|
);
|
||||||
|
} else if name.ends_with(".sbv2") {
|
||||||
|
let entry = &name[..name.len() - 5];
|
||||||
|
log::info!("Try loading: {entry}");
|
||||||
|
let sbv2_bytes = match fs::read(format!("{models}/{entry}.sbv2")).await {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("Error loading sbv2_bytes from file {entry}: {e}");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if let Err(e) = tts_model.load_sbv2file(entry, sbv2_bytes) {
|
||||||
|
log::warn!("Error loading {entry}: {e}");
|
||||||
|
};
|
||||||
|
log::info!("Loaded: {entry}");
|
||||||
|
} else if name.ends_with(".aivmx") {
|
||||||
|
let entry = &name[..name.len() - 6];
|
||||||
|
log::info!("Try loading: {entry}");
|
||||||
|
let aivmx_bytes = match fs::read(format!("{models}/{entry}.aivmx")).await {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("Error loading aivmx bytes from file {entry}: {e}");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if let Err(e) = tts_model.load_aivmx(entry, aivmx_bytes) {
|
||||||
|
log::error!("Error loading {entry}: {e}");
|
||||||
|
}
|
||||||
|
log::info!("Loaded: {entry}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for entry in entries {
|
||||||
|
log::info!("Try loading: {entry}");
|
||||||
|
let style_vectors_bytes =
|
||||||
|
match fs::read(format!("{models}/style_vectors_{entry}.json")).await {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("Error loading style_vectors_bytes from file {entry}: {e}");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let vits2_bytes = match fs::read(format!("{models}/model_{entry}.onnx")).await {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("Error loading vits2_bytes from file {entry}: {e}");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if let Err(e) = tts_model.load(&entry, style_vectors_bytes, vits2_bytes) {
|
||||||
|
log::warn!("Error loading {entry}: {e}");
|
||||||
|
};
|
||||||
|
log::info!("Loaded: {entry}");
|
||||||
|
}
|
||||||
|
Ok(Self {
|
||||||
|
tts_model: Arc::new(Mutex::new(tts_model)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
dotenvy::dotenv_override().ok();
|
||||||
|
env_logger::init();
|
||||||
|
let app = Router::new()
|
||||||
|
.route("/", get(|| async { "Hello, world!" }))
|
||||||
|
.route("/audio_query", get(create_audio_query))
|
||||||
|
.route("/synthesis", post(synthesis))
|
||||||
|
.with_state(AppState::new().await?);
|
||||||
|
let listener = TcpListener::bind("0.0.0.0:8080").await?;
|
||||||
|
axum::serve(listener, app).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
# StyleBertVITS2 wasm
|
# StyleBertVITS2 wasm
|
||||||
refer to https://github.com/tuna2134/sbv2-api
|
refer to https://github.com/neodyland/sbv2-api
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
},
|
},
|
||||||
"keywords": [],
|
"keywords": [],
|
||||||
"author": "tuna2134",
|
"author": "tuna2134",
|
||||||
|
"contributes": ["neodyland"],
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@biomejs/biome": "^1.9.4",
|
"@biomejs/biome": "^1.9.4",
|
||||||
|
|||||||
8
crates/sbv2_wasm/pnpm-lock.yaml
generated
8
crates/sbv2_wasm/pnpm-lock.yaml
generated
@@ -23,7 +23,7 @@ importers:
|
|||||||
version: 0.25.0
|
version: 0.25.0
|
||||||
typescript:
|
typescript:
|
||||||
specifier: ^5.7.3
|
specifier: ^5.7.3
|
||||||
version: 5.7.3
|
version: 5.8.3
|
||||||
|
|
||||||
packages:
|
packages:
|
||||||
|
|
||||||
@@ -290,8 +290,8 @@ packages:
|
|||||||
resolution: {integrity: sha512-mRUWCc3KUU4w1jU8sGxICXH/gNS94DvI1gxqDvBzhj1JpcsimQkYiOJfwsPUykUI5ZaspFbSgmBLER8IrQ3tqw==}
|
resolution: {integrity: sha512-mRUWCc3KUU4w1jU8sGxICXH/gNS94DvI1gxqDvBzhj1JpcsimQkYiOJfwsPUykUI5ZaspFbSgmBLER8IrQ3tqw==}
|
||||||
engines: {node: '>=12.0.0'}
|
engines: {node: '>=12.0.0'}
|
||||||
|
|
||||||
typescript@5.7.3:
|
typescript@5.8.3:
|
||||||
resolution: {integrity: sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==}
|
resolution: {integrity: sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==}
|
||||||
engines: {node: '>=14.17'}
|
engines: {node: '>=14.17'}
|
||||||
hasBin: true
|
hasBin: true
|
||||||
|
|
||||||
@@ -499,6 +499,6 @@ snapshots:
|
|||||||
'@types/node': 22.13.5
|
'@types/node': 22.13.5
|
||||||
long: 5.3.1
|
long: 5.3.1
|
||||||
|
|
||||||
typescript@5.7.3: {}
|
typescript@5.8.3: {}
|
||||||
|
|
||||||
undici-types@6.20.0: {}
|
undici-types@6.20.0: {}
|
||||||
|
|||||||
1
scripts/convert/.python-version
Normal file
1
scripts/convert/.python-version
Normal file
@@ -0,0 +1 @@
|
|||||||
|
3.11
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
style-bert-vits2
|
git+https://github.com/neodyland/style-bert-vits2-ref
|
||||||
onnxsim
|
onnxsim
|
||||||
numpy<2
|
numpy<2
|
||||||
zstandard
|
zstandard
|
||||||
onnxruntime
|
onnxruntime
|
||||||
|
cmake<4
|
||||||
@@ -2,8 +2,16 @@ FROM rust AS builder
|
|||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN cargo build -r --bin sbv2_api
|
RUN cargo build -r --bin sbv2_api
|
||||||
FROM gcr.io/distroless/cc-debian12
|
FROM ubuntu AS upx
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
|
RUN apt update && apt-get install -y upx binutils
|
||||||
COPY --from=builder /work/target/release/sbv2_api /work/main
|
COPY --from=builder /work/target/release/sbv2_api /work/main
|
||||||
COPY --from=builder /work/target/release/*.so /work
|
COPY --from=builder /work/target/release/*.so /work
|
||||||
|
RUN upx --best --lzma /work/main
|
||||||
|
RUN find /work -maxdepth 1 -name "*.so" -exec strip --strip-unneeded {} +
|
||||||
|
RUN find /work -maxdepth 1 -name "*.so" -exec upx --best --lzma {} +
|
||||||
|
FROM gcr.io/distroless/cc-debian12
|
||||||
|
WORKDIR /work
|
||||||
|
COPY --from=upx /work/main /work/main
|
||||||
|
COPY --from=upx /work/*.so /work
|
||||||
CMD ["/work/main"]
|
CMD ["/work/main"]
|
||||||
@@ -2,9 +2,16 @@ FROM rust AS builder
|
|||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN cargo build -r --bin sbv2_api -F cuda,cuda_tf32
|
RUN cargo build -r --bin sbv2_api -F cuda,cuda_tf32
|
||||||
FROM nvidia/cuda:12.3.2-cudnn9-runtime-ubuntu22.04
|
FROM ubuntu AS upx
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
|
RUN apt update && apt-get install -y upx binutils
|
||||||
COPY --from=builder /work/target/release/sbv2_api /work/main
|
COPY --from=builder /work/target/release/sbv2_api /work/main
|
||||||
COPY --from=builder /work/target/release/*.so /work
|
COPY --from=builder /work/target/release/*.so /work
|
||||||
|
RUN upx --best --lzma /work/main
|
||||||
|
RUN find /work -maxdepth 1 -name "*.so" -exec strip --strip-unneeded {} +
|
||||||
|
FROM nvidia/cuda:12.3.2-cudnn9-runtime-ubuntu22.04
|
||||||
|
WORKDIR /work
|
||||||
|
COPY --from=upx /work/main /work/main
|
||||||
|
COPY --from=upx /work/*.so /work
|
||||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/work
|
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/work
|
||||||
CMD ["/work/main"]
|
CMD ["/work/main"]
|
||||||
@@ -1,3 +1,3 @@
|
|||||||
docker run -it --rm -p 3000:3000 --name sbv2 \
|
docker run -it --rm -p 3000:3000 --name sbv2 \
|
||||||
-v ./models:/work/models --env-file .env \
|
-v ./models:/work/models --env-file .env \
|
||||||
ghcr.io/tuna2134/sbv2-api:cpu
|
ghcr.io/neodyland/sbv2-api:cpu
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
docker run -it --rm -p 3000:3000 --name sbv2 \
|
docker run -it --rm -p 3000:3000 --name sbv2 \
|
||||||
-v ./models:/work/models --env-file .env \
|
-v ./models:/work/models --env-file .env \
|
||||||
--gpus all \
|
--gpus all \
|
||||||
ghcr.io/tuna2134/sbv2-api:cuda
|
ghcr.io/neodyland/sbv2-api:cuda
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ def main():
|
|||||||
style_vector = model.get_style_vector("amitaro", 0, 1.0)
|
style_vector = model.get_style_vector("amitaro", 0, 1.0)
|
||||||
with open("output.wav", "wb") as f:
|
with open("output.wav", "wb") as f:
|
||||||
f.write(
|
f.write(
|
||||||
model.synthesize("おはようございます。", "amitaro", style_vector, 0.0, 0.5)
|
model.synthesize("おはようございます。", "amitaro", 0, 0, 0.0, 0.5)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
19
test.py
Normal file
19
test.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
import requests
|
||||||
|
|
||||||
|
|
||||||
|
data = (requests.get("http://localhost:8080/audio_query", params={
|
||||||
|
"text": "こんにちは、今日はいい天気ですね。",
|
||||||
|
})).json()
|
||||||
|
print(data)
|
||||||
|
|
||||||
|
data = (requests.post("http://localhost:8080/synthesis", json={
|
||||||
|
"text": data["text"],
|
||||||
|
"ident": "tsukuyomi",
|
||||||
|
"speaker_id": 0,
|
||||||
|
"style_id": 0,
|
||||||
|
"sdp_ratio": 0.5,
|
||||||
|
"length_scale": 0.5,
|
||||||
|
"audio_query": data["audio_query"],
|
||||||
|
})).content
|
||||||
|
with open("test.wav", "wb") as f:
|
||||||
|
f.write(data)
|
||||||
Reference in New Issue
Block a user