Skip to content

Commit 8a1d206

Browse files
authored
tts : fix n_ubatch + make WavTokenizer cache-less (ggml-org#13713)
ggml-ci
1 parent 797990c commit 8a1d206

File tree

2 files changed

+5
-2
lines changed

2 files changed

+5
-2
lines changed

src/llama-model.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13189,6 +13189,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
1318913189
case LLM_ARCH_JINA_BERT_V2:
1319013190
case LLM_ARCH_NOMIC_BERT:
1319113191
case LLM_ARCH_NOMIC_BERT_MOE:
13192+
case LLM_ARCH_WAVTOKENIZER_DEC:
1319213193
{
1319313194
res = nullptr;
1319413195
} break;

tools/tts/tts.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -579,6 +579,8 @@ int main(int argc, char ** argv) {
579579

580580
params.model = params.vocoder.model;
581581
params.embedding = true;
582+
params.ctx_shift = false; // silence warning
583+
params.n_ubatch = params.n_batch;
582584

583585
common_init_result llama_init_cts = common_init_from_params(params);
584586

@@ -1020,8 +1022,8 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14
10201022
}
10211023
GGML_ASSERT(batch.n_tokens == n_codes);
10221024

1023-
if (llama_decode(ctx_cts, batch) != 0) {
1024-
LOG_ERR("%s: llama_decode() failed\n", __func__);
1025+
if (llama_encode(ctx_cts, batch) != 0) {
1026+
LOG_ERR("%s: llama_encode() failed\n", __func__);
10251027
return 1;
10261028
}
10271029

0 commit comments

Comments
 (0)