This commit is contained in:
Googlefan
2025-02-22 08:06:04 +00:00
12 changed files with 894 additions and 318 deletions

View File

@@ -1,19 +1,19 @@
[package]
name = "sbv2_api"
version = "0.2.0-alpha"
version = "0.2.0-alpha4"
edition = "2021"
[dependencies]
anyhow.workspace = true
axum = "0.7.5"
axum = "0.8.0"
dotenvy.workspace = true
env_logger.workspace = true
log = "0.4.22"
sbv2_core = { version = "0.2.0-alpha2", path = "../sbv2_core" }
sbv2_core = { version = "0.2.0-alpha2", path = "../sbv2_core", features = ["aivmx"] }
serde = { version = "1.0.210", features = ["derive"] }
tokio = { version = "1.40.0", features = ["full"] }
utoipa = { version = "5.0.0", features = ["axum_extras"] }
utoipa-scalar = { version = "0.2.0", features = ["axum"] }
utoipa-scalar = { version = "0.3.0", features = ["axum"] }
[features]
coreml = ["sbv2_core/coreml"]

View File

@@ -40,6 +40,14 @@ fn length_default() -> f32 {
1.0
}
fn style_id_default() -> i32 {
0
}
fn speaker_id_default() -> i64 {
0
}
#[derive(Deserialize, ToSchema)]
struct SynthesizeRequest {
text: String,
@@ -48,6 +56,10 @@ struct SynthesizeRequest {
sdp_ratio: f32,
#[serde(default = "length_default")]
length_scale: f32,
#[serde(default = "style_id_default")]
style_id: i32,
#[serde(default = "speaker_id_default")]
speaker_id: i64,
}
#[utoipa::path(
@@ -65,6 +77,8 @@ async fn synthesize(
ident,
sdp_ratio,
length_scale,
style_id,
speaker_id,
}): Json<SynthesizeRequest>,
) -> AppResult<impl IntoResponse> {
log::debug!("processing request: text={text}, ident={ident}, sdp_ratio={sdp_ratio}, length_scale={length_scale}");
@@ -73,7 +87,8 @@ async fn synthesize(
tts_model.easy_synthesize(
&ident,
&text,
0,
style_id,
speaker_id,
SynthesizeOptions {
sdp_ratio,
length_scale,
@@ -125,6 +140,20 @@ impl AppState {
log::warn!("Error loading {entry}: {e}");
};
log::info!("Loaded: {entry}");
} else if name.ends_with(".aivmx") {
let entry = &name[..name.len() - 6];
log::info!("Try loading: {entry}");
let aivmx_bytes = match fs::read(format!("{models}/{entry}.aivmx")).await {
Ok(b) => b,
Err(e) => {
log::warn!("Error loading aivmx bytes from file {entry}: {e}");
continue;
}
};
if let Err(e) = tts_model.load_aivmx(entry, aivmx_bytes) {
log::error!("Error loading {entry}: {e}");
}
log::info!("Loaded: {entry}");
}
}
for entry in entries {

View File

@@ -1,6 +1,6 @@
[package]
name = "sbv2_bindings"
version = "0.2.0-alpha2"
version = "0.2.0-alpha4"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -11,5 +11,5 @@ crate-type = ["cdylib"]
[dependencies]
anyhow.workspace = true
ndarray.workspace = true
pyo3 = { version = "0.22.0", features = ["anyhow"] }
pyo3 = { version = "0.23.0", features = ["anyhow"] }
sbv2_core = { version = "0.2.0-alpha2", path = "../sbv2_core" }

View File

@@ -142,6 +142,7 @@ impl TTSModel {
text: String,
ident: String,
style_id: i32,
speaker_id: i64,
sdp_ratio: f32,
length_scale: f32,
) -> anyhow::Result<Bound<'p, PyBytes>> {
@@ -149,13 +150,14 @@ impl TTSModel {
ident.as_str(),
&text,
style_id,
speaker_id,
SynthesizeOptions {
sdp_ratio,
length_scale,
..Default::default()
},
)?;
Ok(PyBytes::new_bound(py, &data))
Ok(PyBytes::new(py, &data))
}
fn unload(&mut self, ident: String) -> bool {

View File

@@ -1,7 +1,7 @@
[package]
name = "sbv2_core"
description = "Style-Bert-VITSの推論ライブラリ"
version = "0.2.0-alpha2"
version = "0.2.0-alpha4"
edition = "2021"
license = "MIT"
readme = "../README.md"
@@ -10,11 +10,13 @@ documentation = "https://docs.rs/sbv2_core"
[dependencies]
anyhow.workspace = true
base64 = { version = "0.22.1", optional = true }
dotenvy.workspace = true
env_logger.workspace = true
hound = "3.5.1"
jpreprocess = { version = "0.12.0", features = ["naist-jdic"] }
ndarray.workspace = true
npyz = { version = "0.8.3", optional = true }
num_cpus = "1.16.0"
once_cell.workspace = true
ort = { git = "https://github.com/pykeio/ort.git", version = "2.0.0-rc.9", optional = true }
@@ -36,3 +38,5 @@ tensorrt = ["ort/tensorrt", "std"]
coreml = ["ort/coreml", "std"]
default = ["std"]
no_std = ["tokenizers/unstable_wasm"]
aivmx = ["npyz", "base64"]
base64 = ["dep:base64"]

View File

@@ -23,6 +23,9 @@ pub enum Error {
HoundError(#[from] hound::Error),
#[error("model not found error")]
ModelNotFoundError(String),
#[cfg(feature = "base64")]
#[error("base64 error")]
Base64Error(#[from] base64::DecodeError),
#[error("other")]
OtherError(String),
}

View File

@@ -15,13 +15,22 @@ fn main_inner() -> anyhow::Result<()> {
.ok()
.and_then(|x| x.parse().ok()),
)?;
tts_holder.load_sbv2file(ident, fs::read(env::var("MODEL_PATH")?)?)?;
#[cfg(not(feature = "aivmx"))]
{
tts_holder.load_sbv2file(ident, fs::read(env::var("MODEL_PATH")?)?)?;
}
#[cfg(feature = "aivmx")]
{
tts_holder.load_aivmx(ident, fs::read(env::var("MODEL_PATH")?)?)?;
}
let audio = tts_holder.easy_synthesize(ident, &text, 0, tts::SynthesizeOptions::default())?;
let audio =
tts_holder.easy_synthesize(ident, &text, 0, 0, tts::SynthesizeOptions::default())?;
fs::write("output.wav", audio)?;
Ok(())
}
#[cfg(not(feature = "std"))]
fn main_inner() -> anyhow::Result<()> {
Ok(())

View File

@@ -1,7 +1,13 @@
use crate::error::{Error, Result};
use crate::{jtalk, model, style, tokenizer, tts_util};
#[cfg(feature = "aivmx")]
use base64::prelude::{Engine as _, BASE64_STANDARD};
#[cfg(feature = "aivmx")]
use ndarray::ShapeBuilder;
use ndarray::{concatenate, Array1, Array2, Array3, Axis};
use ort::session::Session;
#[cfg(feature = "aivmx")]
use std::io::Cursor;
use tokenizers::Tokenizer;
#[derive(PartialEq, Eq, Clone)]
@@ -69,6 +75,53 @@ impl TTSModelHolder {
self.models.iter().map(|m| m.ident.to_string()).collect()
}
#[cfg(feature = "aivmx")]
pub fn load_aivmx<I: Into<TTSIdent>, P: AsRef<[u8]>>(
&mut self,
ident: I,
aivmx_bytes: P,
) -> Result<()> {
let ident = ident.into();
if self.find_model(ident.clone()).is_err() {
let mut load = true;
if let Some(max) = self.max_loaded_models {
if self.models.iter().filter(|x| x.vits2.is_some()).count() >= max {
load = false;
}
}
let model = model::load_model(&aivmx_bytes, false)?;
let metadata = model.metadata()?;
if let Some(aivm_style_vectors) = metadata.custom("aivm_style_vectors")? {
let aivm_style_vectors = BASE64_STANDARD.decode(aivm_style_vectors)?;
let style_vectors = Cursor::new(&aivm_style_vectors);
let reader = npyz::NpyFile::new(style_vectors)?;
let style_vectors = {
let shape = reader.shape().to_vec();
let order = reader.order();
let data = reader.into_vec::<f32>()?;
let shape = match shape[..] {
[i1, i2] => [i1 as usize, i2 as usize],
_ => panic!("expected 2D array"),
};
let true_shape = shape.set_f(order == npyz::Order::Fortran);
ndarray::Array2::from_shape_vec(true_shape, data)?
};
drop(metadata);
self.models.push(TTSModel {
vits2: if load { Some(model) } else { None },
bytes: if self.max_loaded_models.is_some() {
Some(aivmx_bytes.as_ref().to_vec())
} else {
None
},
ident,
style_vectors,
})
}
}
Ok(())
}
/// Load a .sbv2 file binary
///
/// # Examples
@@ -229,6 +282,7 @@ impl TTSModelHolder {
ident: I,
text: &str,
style_id: i32,
speaker_id: i64,
options: SynthesizeOptions,
) -> Result<Vec<u8>> {
self.find_and_load_model(ident)?;
@@ -251,11 +305,14 @@ impl TTSModelHolder {
vits2,
bert_ori.to_owned(),
phones,
Array1::from_vec(vec![speaker_id]),
tones,
lang_ids,
style_vector.clone(),
options.sdp_ratio,
options.length_scale,
0.677,
0.8,
)?;
audios.push(audio.clone());
if i != texts.len() - 1 {
@@ -278,50 +335,18 @@ impl TTSModelHolder {
vits2,
bert_ori.to_owned(),
phones,
Array1::from_vec(vec![speaker_id]),
tones,
lang_ids,
style_vector,
options.sdp_ratio,
options.length_scale,
0.677,
0.8,
)?
};
tts_util::array_to_vec(audio_array)
}
/// Synthesize text to audio
///
/// # Note
/// This function is for low-level usage, use `easy_synthesize` for high-level usage.
#[allow(clippy::too_many_arguments)]
pub fn synthesize<I: Into<TTSIdent> + Copy>(
&mut self,
ident: I,
bert_ori: Array2<f32>,
phones: Array1<i64>,
tones: Array1<i64>,
lang_ids: Array1<i64>,
style_vector: Array1<f32>,
sdp_ratio: f32,
length_scale: f32,
) -> Result<Vec<u8>> {
self.find_and_load_model(ident)?;
let vits2 = self
.find_model(ident)?
.vits2
.as_mut()
.ok_or(Error::ModelNotFoundError(ident.into().to_string()))?;
let audio_array = model::synthesize(
vits2,
bert_ori.to_owned(),
phones,
tones,
lang_ids,
style_vector,
sdp_ratio,
length_scale,
)?;
tts_util::array_to_vec(audio_array)
}
}
/// Synthesize options