Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -233,12 +233,21 @@ Drop-in wrappers for every major AI framework:
import { withSupermemory } from "@supermemory/tools/ai-sdk";
const model = withSupermemory(openai("gpt-4o"), "user_123");

// MiniMax (OpenAI-compatible)
import OpenAI from "openai";
import { withSupermemory } from "@supermemory/tools/minimax";
const minimax = new OpenAI({
apiKey: process.env.MINIMAX_API_KEY,
baseURL: "https://api.minimax.io/v1",
});
const client = withSupermemory(minimax, "user_123", { mode: "full" });

// Mastra
import { withSupermemory } from "@supermemory/tools/mastra";
const agent = new Agent(withSupermemory(config, "user-123", { mode: "full" }));
```

**Vercel AI SDK** · **LangChain** · **LangGraph** · **OpenAI Agents SDK** · **Mastra** · **Agno** · **Claude Memory Tool** · **n8n**
**Vercel AI SDK** · **LangChain** · **LangGraph** · **OpenAI Agents SDK** · **MiniMax** · **Mastra** · **Agno** · **Claude Memory Tool** · **n8n**

### Search modes

Expand Down
1 change: 1 addition & 0 deletions packages/tools/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
"./claude-memory": "./dist/claude-memory.js",
"./mastra": "./dist/mastra.js",
"./openai": "./dist/openai/index.js",
"./minimax": "./dist/minimax/index.js",
"./package.json": "./package.json"
},
"repository": {
Expand Down
150 changes: 150 additions & 0 deletions packages/tools/src/minimax/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
import type OpenAI from "openai"
import {
createOpenAIMiddleware,
type OpenAIMiddlewareOptions,
} from "../openai/middleware"

/**
* MiniMax model definitions.
*
* MiniMax provides OpenAI-compatible chat models via `https://api.minimax.io/v1`.
*/
export const MINIMAX_MODELS = [
{ id: "MiniMax-M2.7", name: "MiniMax M2.7" },
{ id: "MiniMax-M2.7-highspeed", name: "MiniMax M2.7 Highspeed" },
] as const

export type MiniMaxModelId = (typeof MINIMAX_MODELS)[number]["id"]

/** Default MiniMax API base URL (international). */
export const MINIMAX_BASE_URL = "https://api.minimax.io/v1"

export interface MiniMaxSupermemoryOptions extends OpenAIMiddlewareOptions {
/**
* MiniMax API key. Falls back to `process.env.MINIMAX_API_KEY`.
*/
minimaxApiKey?: string
/**
* MiniMax API base URL. Defaults to `https://api.minimax.io/v1`.
*/
minimaxBaseUrl?: string
}

/**
* Clamps temperature to MiniMax's accepted range (0, 1.0].
* MiniMax does not accept temperature = 0; the minimum is a small positive value.
*/
export function clampTemperature(temperature?: number): number {
if (temperature === undefined || temperature === null) return 1.0
if (temperature <= 0) return 0.01
if (temperature > 1) return 1.0
return temperature
}

/**
* Creates an OpenAI client pre-configured for MiniMax's API and wraps it with
* SuperMemory middleware to automatically inject relevant memories.
*
* This is a convenience wrapper that combines MiniMax client creation with
* SuperMemory's memory middleware, so you get persistent memory out of the box
* when using MiniMax models.
*
* @param openaiClient - An OpenAI client instance configured with MiniMax's base URL and API key.
* You can create one like:
* ```typescript
* import OpenAI from "openai"
* const client = new OpenAI({
* apiKey: process.env.MINIMAX_API_KEY,
* baseURL: "https://api.minimax.io/v1",
* })
* ```
* @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID)
* @param options - Optional configuration options for the middleware
*
* @returns An OpenAI client with SuperMemory middleware injected for both Chat Completions and Responses APIs
*
* @example
* ```typescript
* import OpenAI from "openai"
* import { withSupermemory } from "@supermemory/tools/minimax"
*
* const minimax = new OpenAI({
* apiKey: process.env.MINIMAX_API_KEY,
* baseURL: "https://api.minimax.io/v1",
* })
*
* const client = withSupermemory(minimax, "user-123", {
* mode: "full",
* addMemory: "always",
* })
*
* const response = await client.chat.completions.create({
* model: "MiniMax-M2.7",
* messages: [{ role: "user", content: "What's my favorite color?" }],
* temperature: 0.7,
* })
* ```
*
* @throws {Error} When SUPERMEMORY_API_KEY environment variable is not set
*/
export function withSupermemory(
openaiClient: OpenAI,
containerTag: string,
options?: OpenAIMiddlewareOptions,
) {
if (!process.env.SUPERMEMORY_API_KEY) {
throw new Error("SUPERMEMORY_API_KEY is not set")
}

const conversationId = options?.conversationId
const verbose = options?.verbose ?? false
const mode = options?.mode ?? "profile"
const addMemory = options?.addMemory ?? "never"
const baseUrl = options?.baseUrl

const openaiWithSupermemory = createOpenAIMiddleware(
openaiClient,
containerTag,
{
conversationId,
verbose,
mode,
addMemory,
baseUrl,
},
)

return openaiWithSupermemory
}

export type { OpenAIMiddlewareOptions }
export type {
MemorySearchResult,
MemoryAddResult,
ProfileResult,
DocumentListResult,
DocumentDeleteResult,
DocumentAddResult,
MemoryForgetResult,
} from "../openai/tools"
export {
createSearchMemoriesFunction,
createAddMemoryFunction,
createGetProfileFunction,
createDocumentListFunction,
createDocumentDeleteFunction,
createDocumentAddFunction,
createMemoryForgetFunction,
supermemoryTools,
getToolDefinitions,
createToolCallExecutor,
createToolCallsExecutor,
createSearchMemoriesTool,
createAddMemoryTool,
createGetProfileTool,
createDocumentListTool,
createDocumentDeleteTool,
createDocumentAddTool,
createMemoryForgetTool,
memoryToolSchemas,
} from "../openai/tools"
92 changes: 92 additions & 0 deletions packages/tools/test/minimax/integration.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
/**
* Integration tests for the MiniMax + Supermemory integration.
*
* These tests verify that MiniMax models work correctly through the
* OpenAI-compatible API when wrapped with Supermemory memory middleware.
*
* Requires:
* - MINIMAX_API_KEY: MiniMax API key
* - SUPERMEMORY_API_KEY: Supermemory API key
*/

import { describe, it, expect, vi } from "vitest"
import OpenAI from "openai"
import {
withSupermemory,
MINIMAX_BASE_URL,
clampTemperature,
} from "../../src/minimax"
import "dotenv/config"

const MINIMAX_API_KEY = process.env.MINIMAX_API_KEY || ""
const SUPERMEMORY_API_KEY = process.env.SUPERMEMORY_API_KEY || ""

const shouldRunIntegration = !!MINIMAX_API_KEY && !!SUPERMEMORY_API_KEY

describe.skipIf(!shouldRunIntegration)(
"Integration: MiniMax + Supermemory",
() => {
const createMiniMaxClient = () =>
new OpenAI({
apiKey: MINIMAX_API_KEY,
baseURL: MINIMAX_BASE_URL,
})

it(
"should complete a basic chat via MiniMax API",
async () => {
const client = createMiniMaxClient()

const response = await client.chat.completions.create({
model: "MiniMax-M2.7",
messages: [
{ role: "user", content: 'Say exactly "hello minimax"' },
],
max_tokens: 20,
temperature: clampTemperature(0.7),
})

expect(response.choices).toBeDefined()
expect(response.choices.length).toBeGreaterThan(0)
expect(response.choices[0]?.message?.content).toBeTruthy()
},
30000,
)

it(
"should work with MiniMax-M2.7-highspeed model",
async () => {
const client = createMiniMaxClient()

const response = await client.chat.completions.create({
model: "MiniMax-M2.7-highspeed",
messages: [{ role: "user", content: "What is 2+2?" }],
max_tokens: 20,
temperature: clampTemperature(0.5),
})

expect(response.choices).toBeDefined()
expect(response.choices[0]?.message?.content).toBeTruthy()
},
30000,
)

it(
"should work with supermemory middleware wrapping MiniMax client",
async () => {
const client = createMiniMaxClient()
const wrapped = withSupermemory(
client,
"minimax-integration-test",
{
mode: "profile",
},
)

// The wrapped client should still have the chat API
expect(wrapped.chat.completions.create).toBeDefined()
},
10000,
)
},
)
Loading