Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,10 @@ test-e2e: build-mock-backend prepare-e2e run-e2e-image
$(MAKE) teardown-e2e
docker rmi localai-tests

test-e2e-spiritlm: build-mock-backend
@echo 'Running SpiritLM e2e tests (mock backend)'
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="SpiritLM" --flake-attempts $(TEST_FLAKES) -v ./tests/e2e/...

teardown-e2e:
rm -rf $(TEST_DIR) || true
docker stop $$(docker ps -q --filter ancestor=localai-tests)
Expand All @@ -247,6 +251,10 @@ test-stablediffusion: prepare-test
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models BACKENDS_PATH=$(abspath ./)/backends \
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stablediffusion" --flake-attempts $(TEST_FLAKES) -v -r $(TEST_PATHS)

test-spiritlm: prepare-test
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures MODELS_PATH=$(abspath ./)/test-models BACKENDS_PATH=$(abspath ./)/backend/python SPIRITLM_CHECKPOINTS_DIR=$(SPIRITLM_CHECKPOINTS_DIR) \
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="spiritlm" --flake-attempts $(TEST_FLAKES) -v -r $(TEST_PATHS)

test-stores:
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stores" --flake-attempts $(TEST_FLAKES) -v -r tests/integration

Expand Down Expand Up @@ -312,7 +320,7 @@ protoc:
.PHONY: protogen-go
protogen-go: protoc install-go-tools
mkdir -p pkg/grpc/proto
./protoc --experimental_allow_proto3_optional -Ibackend/ --go_out=pkg/grpc/proto/ --go_opt=paths=source_relative --go-grpc_out=pkg/grpc/proto/ --go-grpc_opt=paths=source_relative \
PATH="$$(go env GOPATH)/bin:$$PATH" ./protoc --experimental_allow_proto3_optional -Ibackend/ --go_out=pkg/grpc/proto/ --go_opt=paths=source_relative --go-grpc_out=pkg/grpc/proto/ --go-grpc_opt=paths=source_relative \
backend/backend.proto

.PHONY: protogen-go-clean
Expand Down
21 changes: 21 additions & 0 deletions backend/index.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -594,6 +594,27 @@
nvidia-cuda-13: "cuda13-nemo"
nvidia-cuda-12: "cuda12-nemo"
icon: https://www.nvidia.com/favicon.ico
- &spiritlm
urls:
- https://github.com/facebookresearch/spiritlm
description: |
Meta Spirit LM: interleaved spoken and written language model. Supports text generation, text-to-speech (TTS), and automatic speech recognition (ASR) in a single 7B model.
tags:
- text-to-text
- text-to-speech
- TTS
- speech-recognition
- ASR
- LLM
- multimodal
license: fair-noncommercial
name: "spiritlm"
alias: "spiritlm"
capabilities:
nvidia: "cuda12-spiritlm"
default: "cpu-spiritlm"
nvidia-cuda-12: "cuda12-spiritlm"
icon: https://ai.meta.com/favicon.ico
- &voxcpm
urls:
- https://github.com/ModelBest/VoxCPM
Expand Down
1 change: 1 addition & 0 deletions backend/python/backend.proto
69 changes: 69 additions & 0 deletions backend/python/spiritlm/E2E.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# SpiritLM E2E tests

SpiritLM is covered by two test layers:

1. **`tests/e2e/` (recommended for CI)** – Full e2e suite using the shared mock backend. File: `tests/e2e/spiritlm_e2e_test.go`, label: `SpiritLM`. No real SpiritLM backend or model required.
2. **`core/http/app_test.go`** – Integration-style tests under context **SpiritLM backend e2e** (label: `spiritlm`). Requires the Python SpiritLM backend and fixtures.

## How to run

From the repo root:

**E2E suite (mock backend, no Python backend needed):**

```bash
make test-e2e-spiritlm
```

Or run the full e2e suite (includes SpiritLM):

```bash
make test-e2e
```

**Integration tests (real SpiritLM backend):**

```bash
make test-spiritlm
```

This sets `BACKENDS_PATH=./backend/python` and `TEST_DIR=./test-dir`, runs `prepare-test`, then runs Ginkgo with `--label-filter="spiritlm"`.

For the **transcription** test you need `test-dir/audio.wav` (e.g. run `make test-models/testmodel.ggml` once to download it, or set `TEST_DIR` to a directory that contains `audio.wav`).

## Backend setup

1. **Protos**
Generate Python gRPC stubs (required for the backend to start):
```bash
cd backend/python/spiritlm && bash protogen.sh
```
Or run the full install (which also creates the venv and installs deps):
```bash
make -C backend/python/spiritlm
```

2. **Full e2e pass (all 3 specs pass)**
- Install the backend: `make -C backend/python/spiritlm`
- Download the Spirit LM model from [Meta AI Spirit LM](https://ai.meta.com/resources/models-and-libraries/spirit-lm-downloads/) and place it so the checkpoint directory layout is:
```
<SPIRITLM_CHECKPOINTS_DIR>/
spiritlm_model/
spirit-lm-base-7b/ # model files (config.json, tokenizer, etc.)
```
- Run the tests with the checkpoint dir set:
```bash
SPIRITLM_CHECKPOINTS_DIR=/path/to/checkpoints make test-spiritlm
```
- Ensure LocalAI runs the backend with that env (e.g. export it before `make test-spiritlm`, or configure the backend to pass it through).

Without the model, the backend starts and responds to Health, but LoadModel fails; the e2e specs **skip** with a message pointing here, and the suite still **passes** (0 failed).

## Requirements

- Linux (tests skip on other OS)
- SpiritLM backend runnable: `backend/python/spiritlm/run.sh` must exist (satisfied in-tree)
- For backend to start: Python protos generated (`backend_pb2.py`, `backend_pb2_grpc.py`) and venv with grpc/spiritlm (via `make -C backend/python/spiritlm`)
- For all 3 specs to pass: Spirit LM model under `SPIRITLM_CHECKPOINTS_DIR` as above

Tests are skipped if `BACKENDS_PATH` is unset or `BACKENDS_PATH/spiritlm/run.sh` is missing.
23 changes: 23 additions & 0 deletions backend/python/spiritlm/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
.PHONY: spiritlm
spiritlm:
bash install.sh

.PHONY: run
run: spiritlm
@echo "Running spiritlm..."
bash run.sh
@echo "spiritlm run."

.PHONY: test
test: spiritlm
@echo "Testing spiritlm..."
bash test.sh
@echo "spiritlm tested."

.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py

.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__
Loading