Skip to content

Commit 78cb1ac

Browse files
feat: Add Dynamic Configuration to ChatProfiles (#2401)
## Changes: - Audio.enabled and MCP.enabled now must be explicitly enabled. Before it was inferred if on_audio_chunk and on_mcp_connect existed. This enables selective enablement by ChatProfile now. - Documentation Update PR: Chainlit/docs#256 ## ✅ Backend Implementation Complete - ChainlitConfigOverrides class in config.py - allows specifying UI, features, and project overrides - ChatProfile.config_overrides field - stores profile-specific configuration overrides - /project/settings endpoint enhancement - applies config overrides based on the chat_profile parameter - Proper serialization - removes config_overrides from profiles sent to frontend (they're applied server-side) - Pydantic dataclass rebuild - fixes forward reference issues ## ✅ Frontend Implementation Complete - Enhanced useConfig hook - includes chat_profile parameter in API calls - Automatic re-fetching - clears config when chat profile changes to trigger re-fetch with new profile-specific settings ## 🎯 How It Works - Default state: Frontend calls /project/settings?language=en-US → gets base configuration - Profile selection: User selects "vision-assistant" → chatProfileState updates - Config refresh: Frontend clears config and calls /project/settings?language=en-US&chat_profile=vision-assistant - Server merges: Backend finds vision-assistant profile, applies its config_overrides to base config - Frontend receives: Merged configuration with vision-assistant's UI name, file upload settings, etc. ## Other: - Added "UP007" to ruff ignore because it was flagging these when using Optional[str] which is valid. <img width="474" height="147" alt="image" src="https://github.com/user-attachments/assets/cfbcf0d6-68e6-4bc6-aeee-539014a52b2d" /> Fixes #1502 --------- Co-authored-by: Aleksandr Vishniakov <[email protected]>
1 parent 501a503 commit 78cb1ac

File tree

11 files changed

+610
-71
lines changed

11 files changed

+610
-71
lines changed

backend/chainlit/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,7 @@ def acall(self):
121121
"instrument_openai": "chainlit.openai",
122122
"instrument_mistralai": "chainlit.mistralai",
123123
"SemanticKernelFilter": "chainlit.semantic_kernel",
124+
"server": "chainlit.server",
124125
}
125126
)
126127

backend/chainlit/config.py

Lines changed: 38 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,7 @@
1717
)
1818

1919
import tomli
20-
from dataclasses_json import DataClassJsonMixin
21-
from pydantic import Field
22-
from pydantic.dataclasses import dataclass
20+
from pydantic import BaseModel, ConfigDict, Field
2321
from starlette.datastructures import Headers
2422

2523
from chainlit.data.base import BaseDataLayer
@@ -119,9 +117,15 @@
119117
max_size_mb = 500
120118
121119
[features.audio]
120+
# Enable audio features
121+
enabled = false
122122
# Sample rate of the audio
123123
sample_rate = 24000
124124
125+
[features.mcp]
126+
# Enable Model Context Protocol (MCP) features
127+
enabled = false
128+
125129
[features.mcp.sse]
126130
enabled = true
127131
@@ -207,8 +211,7 @@
207211
DEFAULT_ROOT_PATH = ""
208212

209213

210-
@dataclass()
211-
class RunSettings:
214+
class RunSettings(BaseModel):
212215
# Name of the module (python file) used in the run command
213216
module_name: Optional[str] = None
214217
host: str = DEFAULT_HOST
@@ -223,64 +226,54 @@ class RunSettings:
223226
ci: bool = False
224227

225228

226-
@dataclass()
227-
class PaletteOptions(DataClassJsonMixin):
229+
class PaletteOptions(BaseModel):
228230
main: Optional[str] = ""
229231
light: Optional[str] = ""
230232
dark: Optional[str] = ""
231233

232234

233-
@dataclass()
234-
class TextOptions(DataClassJsonMixin):
235+
class TextOptions(BaseModel):
235236
primary: Optional[str] = ""
236237
secondary: Optional[str] = ""
237238

238239

239-
@dataclass()
240-
class Palette(DataClassJsonMixin):
240+
class Palette(BaseModel):
241241
primary: Optional[PaletteOptions] = None
242242
background: Optional[str] = ""
243243
paper: Optional[str] = ""
244244
text: Optional[TextOptions] = None
245245

246246

247-
@dataclass
248-
class SpontaneousFileUploadFeature(DataClassJsonMixin):
247+
class SpontaneousFileUploadFeature(BaseModel):
249248
enabled: Optional[bool] = None
250249
accept: Optional[Union[List[str], Dict[str, List[str]]]] = None
251250
max_files: Optional[int] = None
252251
max_size_mb: Optional[int] = None
253252

254253

255-
@dataclass
256-
class AudioFeature(DataClassJsonMixin):
254+
class AudioFeature(BaseModel):
257255
sample_rate: int = 24000
258256
enabled: bool = False
259257

260258

261-
@dataclass
262-
class McpSseFeature(DataClassJsonMixin):
259+
class McpSseFeature(BaseModel):
263260
enabled: bool = True
264261

265262

266-
@dataclass
267-
class McpStreamableHttpFeature(DataClassJsonMixin):
263+
class McpStreamableHttpFeature(BaseModel):
268264
enabled: bool = True
269265

270266

271-
@dataclass
272-
class McpStdioFeature(DataClassJsonMixin):
267+
class McpStdioFeature(BaseModel):
273268
enabled: bool = True
274269
allowed_executables: Optional[list[str]] = None
275270

276271

277-
@dataclass
278-
class SlackFeature(DataClassJsonMixin):
272+
class SlackFeature(BaseModel):
279273
reaction_on_message_received: bool = False
280274

281275

282-
@dataclass
283-
class McpFeature(DataClassJsonMixin):
276+
class McpFeature(BaseModel):
284277
enabled: bool = False
285278
sse: McpSseFeature = Field(default_factory=McpSseFeature)
286279
streamable_http: McpStreamableHttpFeature = Field(
@@ -289,8 +282,7 @@ class McpFeature(DataClassJsonMixin):
289282
stdio: McpStdioFeature = Field(default_factory=McpStdioFeature)
290283

291284

292-
@dataclass()
293-
class FeaturesSettings(DataClassJsonMixin):
285+
class FeaturesSettings(BaseModel):
294286
spontaneous_file_upload: Optional[SpontaneousFileUploadFeature] = None
295287
audio: Optional[AudioFeature] = Field(default_factory=AudioFeature)
296288
mcp: McpFeature = Field(default_factory=McpFeature)
@@ -302,16 +294,14 @@ class FeaturesSettings(DataClassJsonMixin):
302294
edit_message: bool = True
303295

304296

305-
@dataclass
306-
class HeaderLink(DataClassJsonMixin):
297+
class HeaderLink(BaseModel):
307298
name: str
308299
icon_url: str
309300
url: str
310301
display_name: Optional[str] = None
311302

312303

313-
@dataclass()
314-
class UISettings(DataClassJsonMixin):
304+
class UISettings(BaseModel):
315305
name: str
316306
description: str = ""
317307
cot: Literal["hidden", "tool_call", "full"] = "full"
@@ -344,8 +334,7 @@ class UISettings(DataClassJsonMixin):
344334
header_links: Optional[List[HeaderLink]] = None
345335

346336

347-
@dataclass()
348-
class CodeSettings:
337+
class CodeSettings(BaseModel):
349338
# App action functions
350339
action_callbacks: Dict[str, Callable[["Action"], Any]]
351340

@@ -393,8 +382,7 @@ class CodeSettings:
393382
data_layer: Optional[Callable[[], BaseDataLayer]] = None
394383

395384

396-
@dataclass()
397-
class ProjectSettings(DataClassJsonMixin):
385+
class ProjectSettings(BaseModel):
398386
allow_origins: List[str] = Field(default_factory=lambda: ["*"])
399387
# Socket.io client transports option
400388
transports: Optional[List[str]] = None
@@ -411,10 +399,21 @@ class ProjectSettings(DataClassJsonMixin):
411399
cache: bool = False
412400

413401

414-
@dataclass()
415-
class ChainlitConfig:
402+
class ChainlitConfigOverrides(BaseModel):
403+
"""Configuration overrides that can be applied to specific chat profiles."""
404+
405+
ui: Optional[UISettings] = None
406+
features: Optional[FeaturesSettings] = None
407+
project: Optional[ProjectSettings] = None
408+
409+
410+
class ChainlitConfig(BaseModel):
411+
model_config = ConfigDict(
412+
arbitrary_types_allowed=True, revalidate_instances="always"
413+
)
414+
416415
# Directory where the Chainlit project is located
417-
root = APP_ROOT
416+
root: str = APP_ROOT
418417
# Chainlit server URL. Used only for cloud features
419418
chainlit_server: str
420419
run: RunSettings
@@ -510,7 +509,7 @@ def load_module(target: str, force_refresh: bool = False):
510509
site_package_dirs = site.getsitepackages()
511510

512511
# Clear the modules related to the app from sys.modules
513-
for module_name, module in list(sys.modules.items()):
512+
for module_name, module in sys.modules.items():
514513
if (
515514
hasattr(module, "__file__")
516515
and module.__file__

backend/chainlit/server.py

Lines changed: 35 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
DEFAULT_HOST,
4949
FILES_DIRECTORY,
5050
PACKAGE_ROOT,
51+
ChainlitConfig,
5152
config,
5253
load_module,
5354
public_dir,
@@ -777,42 +778,64 @@ async def project_settings(
777778
language: str = Query(
778779
default="en-US", description="Language code", pattern=_language_pattern
779780
),
781+
chat_profile: Optional[str] = Query(
782+
default=None, description="Current chat profile name"
783+
),
780784
):
781785
"""Return project settings. This is called by the UI before the establishing the websocket connection."""
782786

783787
# Load the markdown file based on the provided language
784-
785788
markdown = get_markdown_str(config.root, language)
786789

787790
profiles = []
788791
if config.code.set_chat_profiles:
789792
chat_profiles = await config.code.set_chat_profiles(current_user)
790793
if chat_profiles:
791-
profiles = [p.to_dict() for p in chat_profiles]
794+
# Custom serialization to handle ChainlitConfigOverrides
795+
for p in chat_profiles:
796+
profile_dict = p.to_dict()
797+
# Remove config_overrides from the serialized profile since it's used server-side only
798+
if "config_overrides" in profile_dict:
799+
del profile_dict["config_overrides"]
800+
profiles.append(profile_dict)
792801

793802
starters = []
794803
if config.code.set_starters:
795804
starters = await config.code.set_starters(current_user)
796805
if starters:
797806
starters = [s.to_dict() for s in starters]
798807

799-
if config.code.on_audio_chunk:
800-
config.features.audio.enabled = True
801-
802-
if config.code.on_mcp_connect:
803-
config.features.mcp.enabled = True
804-
805808
debug_url = None
806809
data_layer = get_data_layer()
807810

808811
if data_layer and config.run.debug:
809812
debug_url = await data_layer.build_debug_url()
810813

814+
config_with_overrides = config
815+
816+
# Apply profile-specific configuration overrides
817+
if chat_profile and config.code.set_chat_profiles:
818+
# Find the current chat profile and apply overrides
819+
chat_profiles = await config.code.set_chat_profiles(current_user)
820+
if chat_profiles:
821+
current_profile = next(
822+
(p for p in chat_profiles if p.name == chat_profile), None
823+
)
824+
if current_profile and current_profile.config_overrides:
825+
config_with_overrides = ChainlitConfig.model_validate(
826+
config.model_copy(
827+
update=current_profile.config_overrides.model_dump(
828+
exclude_none=True
829+
),
830+
deep=True,
831+
)
832+
)
833+
811834
return JSONResponse(
812835
content={
813-
"ui": config.ui.to_dict(),
814-
"features": config.features.to_dict(),
815-
"userEnv": config.project.user_env,
836+
"ui": config_with_overrides.ui.model_dump(),
837+
"features": config_with_overrides.features.model_dump(),
838+
"userEnv": config_with_overrides.project.user_env,
816839
"dataPersistence": get_data_layer() is not None,
817840
"threadResumable": bool(config.code.on_chat_resume),
818841
"markdown": markdown,
@@ -1148,7 +1171,7 @@ async def connect_mcp(
11481171
status_code=401,
11491172
)
11501173

1151-
mcp_enabled = config.code.on_mcp_connect is not None
1174+
mcp_enabled = config.features.mcp.enabled
11521175
if mcp_enabled:
11531176
if payload.name in session.mcp_sessions:
11541177
old_client_session, old_exit_stack = session.mcp_sessions[payload.name]

backend/chainlit/socket.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ async def audio_start(sid):
326326
session = WebsocketSession.require(sid)
327327

328328
context = init_ws_context(session)
329-
if config.code.on_audio_start:
329+
if config.features.audio.enabled:
330330
connected = bool(await config.code.on_audio_start())
331331
connection_state = "on" if connected else "off"
332332
await context.emitter.update_audio_connection(connection_state)
@@ -339,7 +339,7 @@ async def audio_chunk(sid, payload: InputAudioChunkPayload):
339339

340340
init_ws_context(session)
341341

342-
if config.code.on_audio_chunk:
342+
if config.features.audio.enabled:
343343
asyncio.create_task(config.code.on_audio_chunk(InputAudioChunk(**payload)))
344344

345345

@@ -355,7 +355,7 @@ async def audio_end(sid):
355355
session.has_first_interaction = True
356356
asyncio.create_task(context.emitter.init_thread("audio"))
357357

358-
if config.code.on_audio_end:
358+
if config.features.audio.enabled:
359359
await config.code.on_audio_end()
360360

361361
except asyncio.CancelledError:

backend/chainlit/types.py

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
from __future__ import annotations
2+
13
from enum import Enum
24
from pathlib import Path
35
from typing import (
@@ -36,8 +38,8 @@ class ThreadDict(TypedDict):
3638
userIdentifier: Optional[str]
3739
tags: Optional[List[str]]
3840
metadata: Optional[Dict]
39-
steps: List["StepDict"]
40-
elements: Optional[List["ElementDict"]]
41+
steps: List[StepDict]
42+
elements: Optional[List[ElementDict]]
4143

4244

4345
class Pagination(BaseModel):
@@ -46,9 +48,9 @@ class Pagination(BaseModel):
4648

4749

4850
class ThreadFilter(BaseModel):
49-
feedback: Optional[Literal[0, 1]] = None
50-
userId: Optional[str] = None
51-
search: Optional[str] = None
51+
feedback: Literal[0, 1] | None = None
52+
userId: str | None = None
53+
search: str | None = None
5254

5355

5456
@dataclass
@@ -65,7 +67,7 @@ def to_dict(self):
6567
}
6668

6769
@classmethod
68-
def from_dict(cls, page_info_dict: Dict) -> "PageInfo":
70+
def from_dict(cls, page_info_dict: Dict) -> PageInfo:
6971
hasNextPage = page_info_dict.get("hasNextPage", False)
7072
startCursor = page_info_dict.get("startCursor", None)
7173
endCursor = page_info_dict.get("endCursor", None)
@@ -100,7 +102,7 @@ def to_dict(self):
100102
@classmethod
101103
def from_dict(
102104
cls, paginated_response_dict: Dict, the_class: HasFromDict[T]
103-
) -> "PaginatedResponse[T]":
105+
) -> PaginatedResponse[T]:
104106
pageInfo = PageInfo.from_dict(paginated_response_dict.get("pageInfo", {}))
105107

106108
data = [the_class.from_dict(d) for d in paginated_response_dict.get("data", [])]
@@ -159,7 +161,7 @@ class FileDict(TypedDict):
159161

160162

161163
class MessagePayload(TypedDict):
162-
message: "StepDict"
164+
message: StepDict
163165
fileReferences: Optional[List[FileReference]]
164166

165167

@@ -251,7 +253,7 @@ class ConnectStreamableHttpMCPRequest(BaseModel):
251253
name: str
252254
url: str
253255
# Optional HTTP headers to forward to the MCP transport (e.g. Authorization)
254-
headers: Optional[Dict[str, str]] = None
256+
headers: Dict[str, str] | None = None
255257

256258

257259
ConnectMCPRequest = Union[
@@ -293,6 +295,7 @@ class ChatProfile(DataClassJsonMixin):
293295
icon: Optional[str] = None
294296
default: bool = False
295297
starters: Optional[List[Starter]] = None
298+
config_overrides: Any = None
296299

297300

298301
FeedbackStrategy = Literal["BINARY"]

0 commit comments

Comments
 (0)