Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit 90ce942

Browse files
authored
FEATURE: Add periodic problem checks for each LLM in use (#1020)
This feature adds a periodic problem check which periodically checks for issues with LLMs that are in use. Periodically, we will run a test to see if the in use LLMs are still operational. If it is not, the LLM with the problem is surfaced to the admin so they can easily go and update the configuration.
1 parent 24b1078 commit 90ce942

File tree

5 files changed

+136
-0
lines changed

5 files changed

+136
-0
lines changed

app/models/llm_model.rb

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,11 @@ class LlmModel < ActiveRecord::Base
1313
validates_presence_of :name, :api_key
1414
validates :max_prompt_tokens, numericality: { greater_than: 0 }
1515
validate :required_provider_params
16+
scope :in_use,
17+
-> do
18+
model_ids = DiscourseAi::Configuration::LlmEnumerator.global_usage.keys
19+
where(id: model_ids)
20+
end
1621

1722
def self.provider_params
1823
{
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
# frozen_string_literal: true
2+
3+
class ProblemCheck::AiLlmStatus < ProblemCheck
4+
self.priority = "high"
5+
self.perform_every = 6.hours
6+
7+
def call
8+
llm_errors
9+
end
10+
11+
def base_path
12+
Discourse.base_path
13+
end
14+
15+
private
16+
17+
def llm_errors
18+
return [] if !SiteSetting.discourse_ai_enabled
19+
LlmModel.in_use.find_each.filter_map do |model|
20+
try_validate(model) { validator.run_test(model) }
21+
end
22+
end
23+
24+
def try_validate(model, &blk)
25+
begin
26+
blk.call
27+
nil
28+
rescue => e
29+
error_message = parse_error_message(e.message)
30+
message =
31+
"#{I18n.t("dashboard.problem.ai_llm_status", { base_path: base_path, model_name: model.display_name, model_id: model.id })}"
32+
33+
Problem.new(
34+
message,
35+
priority: "high",
36+
identifier: "ai_llm_status",
37+
target: model.id,
38+
details: {
39+
model_id: model.id,
40+
model_name: model.display_name,
41+
error: error_message,
42+
},
43+
)
44+
end
45+
end
46+
47+
def validator
48+
@validator ||= DiscourseAi::Configuration::LlmValidator.new
49+
end
50+
51+
def parse_error_message(message)
52+
begin
53+
JSON.parse(message)["message"]
54+
rescue JSON::ParserError
55+
message.to_s
56+
end
57+
end
58+
end

config/locales/server.en.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -453,3 +453,6 @@ en:
453453
no_default_llm: The persona must have a default_llm defined.
454454
user_not_allowed: The user is not allowed to participate in the topic.
455455
prompt_message_length: The message %{idx} is over the 1000 character limit.
456+
dashboard:
457+
problem:
458+
ai_llm_status: "The LLM model: %{model_name} is encountering issues. Please check the <a href='%{base_path}/admin/plugins/discourse-ai/ai-llms/%{model_id}'>model's configuration page</a>."

plugin.rb

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,8 @@ def self.public_asset_path(name)
7575
DiscourseAi::AiModeration::EntryPoint.new,
7676
].each { |a_module| a_module.inject_into(self) }
7777

78+
register_problem_check ProblemCheck::AiLlmStatus
79+
7880
register_reviewable_type ReviewableAiChatMessage
7981
register_reviewable_type ReviewableAiPost
8082

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
# frozen_string_literal: true
2+
3+
require "rails_helper"
4+
5+
RSpec.describe ProblemCheck::AiLlmStatus do
6+
subject(:check) { described_class.new }
7+
8+
fab!(:llm_model)
9+
10+
let(:post_url) { "https://api.openai.com/v1/chat/completions" }
11+
let(:success_response) do
12+
{
13+
model: "gpt-4-turbo",
14+
usage: {
15+
max_prompt_tokens: 131_072,
16+
},
17+
choices: [
18+
{ message: { role: "assistant", content: "test" }, finish_reason: "stop", index: 0 },
19+
],
20+
}.to_json
21+
end
22+
23+
let(:error_response) do
24+
{ message: "API key error! Please check you have supplied the correct API key." }.to_json
25+
end
26+
27+
before do
28+
stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {})
29+
SiteSetting.ai_summarization_model = "custom:#{llm_model.id}"
30+
SiteSetting.ai_summarization_enabled = true
31+
end
32+
33+
describe "#call" do
34+
it "does nothing if discourse-ai plugin disabled" do
35+
SiteSetting.discourse_ai_enabled = false
36+
expect(check).to be_chill_about_it
37+
end
38+
39+
context "with discourse-ai plugin enabled for the site" do
40+
before { SiteSetting.discourse_ai_enabled = true }
41+
42+
it "returns a problem with an LLM model" do
43+
stub_request(:post, post_url).to_return(status: 403, body: error_response, headers: {})
44+
message =
45+
"#{I18n.t("dashboard.problem.ai_llm_status", { base_path: Discourse.base_path, model_name: llm_model.display_name, model_id: llm_model.id })}"
46+
47+
expect(described_class.new.call).to contain_exactly(
48+
have_attributes(
49+
identifier: "ai_llm_status",
50+
target: llm_model.id,
51+
priority: "high",
52+
message: message,
53+
details: {
54+
model_id: llm_model.id,
55+
model_name: llm_model.display_name,
56+
error: JSON.parse(error_response)["message"],
57+
},
58+
),
59+
)
60+
end
61+
62+
it "does not return a problem if the LLM models are working" do
63+
stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {})
64+
expect(check).to be_chill_about_it
65+
end
66+
end
67+
end
68+
end

0 commit comments

Comments
 (0)